From 7a76b40dc6a9718e801b4d8b8c8455e50a8918cb Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 21 Nov 2022 15:29:36 +0200 Subject: [PATCH 01/30] Move import queue out of `sc-network` Add supplementary asynchronous API for the import queue which means it can be run as an independent task and communicated with through the `ImportQueueService`. This commit removes removes block and justification imports from `sc-network` and provides `ChainSync` with a handle to import queue so it can import blocks and justifications. Polling of the import queue is moved complete out of `sc-network` and `sc_consensus::Link` is implemented for `ChainSyncInterfaceHandled` so the import queue can still influence the syncing process. --- Cargo.lock | 2 + client/consensus/common/Cargo.toml | 1 + client/consensus/common/src/import_queue.rs | 23 ++- .../common/src/import_queue/basic_queue.rs | 69 ++++++- .../common/src/import_queue/buffered_link.rs | 32 +++- .../consensus/common/src/import_queue/mock.rs | 46 +++++ client/network/common/src/sync.rs | 13 +- client/network/src/behaviour.rs | 31 +-- client/network/src/config.rs | 7 - client/network/src/lib.rs | 9 +- client/network/src/protocol.rs | 107 +---------- client/network/src/service.rs | 88 +-------- client/network/src/service/metrics.rs | 10 - .../network/src/service/tests/chain_sync.rs | 106 +++++++---- client/network/src/service/tests/mod.rs | 40 +++- client/network/sync/Cargo.toml | 1 + client/network/sync/src/lib.rs | 177 ++++++++++++++++-- client/network/sync/src/mock.rs | 7 +- client/network/sync/src/service/chain_sync.rs | 53 ++++++ client/network/sync/src/service/mock.rs | 33 +++- client/network/sync/src/tests.rs | 3 + client/network/test/src/lib.rs | 12 +- client/service/src/builder.rs | 6 +- client/service/src/chain_ops/import_blocks.rs | 2 +- 24 files changed, 560 insertions(+), 318 deletions(-) create mode 100644 client/consensus/common/src/import_queue/mock.rs diff --git a/Cargo.lock b/Cargo.lock index ca0ebee0ac475..2265d4192cd82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7737,6 +7737,7 @@ dependencies = [ "futures-timer", "libp2p", "log", + "mockall", "parking_lot 0.12.1", "sc-client-api", "sc-utils", @@ -8347,6 +8348,7 @@ dependencies = [ "sp-runtime", "sp-test-primitives", "sp-tracing", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", ] diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 971ee71ab8040..b61c6a4334285 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -18,6 +18,7 @@ futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" +mockall = "0.11.2" parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0.30" diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index 3741fa99663cd..d49b240ef3489 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -53,6 +53,7 @@ pub type DefaultImportQueue = mod basic_queue; pub mod buffered_link; +pub mod mock; /// Shared block import struct used by the queue. pub type BoxBlockImport = @@ -105,10 +106,10 @@ pub trait Verifier: Send + Sync { /// Blocks import queue API. /// /// The `import_*` methods can be called in order to send elements for the import queue to verify. -/// Afterwards, call `poll_actions` to determine how to respond to these elements. -pub trait ImportQueue: Send { +pub trait ImportQueueService: Send { /// Import bunch of blocks. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + /// Import block justifications. fn import_justifications( &mut self, @@ -117,12 +118,26 @@ pub trait ImportQueue: Send { number: NumberFor, justifications: Justifications, ); - /// Polls for actions to perform on the network. - /// +} + +#[async_trait::async_trait] +pub trait ImportQueue: Send { + /// Get a copy of the handle to [`ImportQueueService`]. + fn service(&self) -> Box>; + + /// Get a reference to the handle to [`ImportQueueService`]. + fn service_ref(&mut self) -> &mut dyn ImportQueueService; + /// This method should behave in a way similar to `Future::poll`. It can register the current /// task and notify later when more actions are ready to be polled. To continue the comparison, /// it is as if this method always returned `Poll::Pending`. fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); + + /// Start asynchronous runner for import queue. + /// + /// Takes an object implementing [`Link`] which allows the import queue to + /// influece the synchronization process. + async fn run(self, link: Box>); } /// Hooks that the verification queue can use to influence the synchronization diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 0e607159b75c3..20e8d262cacda 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -34,7 +34,8 @@ use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, IncomingBlock, Link, RuntimeOrigin, Verifier, + BoxJustificationImport, ImportQueue, ImportQueueService, IncomingBlock, Link, + RuntimeOrigin, Verifier, }, metrics::Metrics, }; @@ -42,10 +43,8 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send justification import messages to the background task. - justification_sender: TracingUnboundedSender>, - /// Channel to send block import messages to the background task. - block_import_sender: TracingUnboundedSender>, + /// Handle for sending justification and block import messages to the background task. + handle: BasicQueueHandle, /// Results coming from the worker task. result_port: BufferedLinkReceiver, _phantom: PhantomData, @@ -54,8 +53,7 @@ pub struct BasicQueue { impl Drop for BasicQueue { fn drop(&mut self) { // Flush the queue and close the receiver to terminate the future. - self.justification_sender.close_channel(); - self.block_import_sender.close_channel(); + self.handle.close(); self.result_port.close(); } } @@ -95,11 +93,37 @@ impl BasicQueue { future.boxed(), ); - Self { justification_sender, block_import_sender, result_port, _phantom: PhantomData } + Self { + handle: BasicQueueHandle::new(justification_sender, block_import_sender), + result_port, + _phantom: PhantomData, + } } } -impl ImportQueue for BasicQueue { +#[derive(Clone)] +struct BasicQueueHandle { + /// Channel to send justification import messages to the background task. + justification_sender: TracingUnboundedSender>, + /// Channel to send block import messages to the background task. + block_import_sender: TracingUnboundedSender>, +} + +impl BasicQueueHandle { + pub fn new( + justification_sender: TracingUnboundedSender>, + block_import_sender: TracingUnboundedSender>, + ) -> Self { + Self { justification_sender, block_import_sender } + } + + pub fn close(&mut self) { + self.justification_sender.close_channel(); + self.block_import_sender.close_channel(); + } +} + +impl ImportQueueService for BasicQueueHandle { fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if blocks.is_empty() { return @@ -138,12 +162,39 @@ impl ImportQueue for BasicQueue } } } +} + +#[async_trait::async_trait] +impl ImportQueue for BasicQueue { + /// Get handle to [`ImportQueueService`]. + fn service(&self) -> Box> { + Box::new(self.handle.clone()) + } + /// Get a reference to the handle to [`ImportQueueService`]. + fn service_ref(&mut self) -> &mut dyn ImportQueueService { + &mut self.handle + } + + /// Poll actions from network. fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); } } + + /// Start asynchronous runner for import queue. + /// + /// Takes an object implementing [`Link`] which allows the import queue to + /// influece the synchronization process. + async fn run(mut self, mut link: Box>) { + loop { + if let Err(_) = self.result_port.next_action(&mut *link).await { + log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); + return + } + } + } } /// Messages destinated to the background worker. diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index 5d418dddf0853..e6d3b212fdbac 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -80,7 +80,7 @@ impl Clone for BufferedLinkSender { } /// Internal buffered message. -enum BlockImportWorkerMsg { +pub enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), JustificationImported(RuntimeOrigin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), @@ -122,6 +122,18 @@ pub struct BufferedLinkReceiver { } impl BufferedLinkReceiver { + /// Send action for the synchronization to perform. + pub fn send_actions(&mut self, msg: BlockImportWorkerMsg, link: &mut dyn Link) { + match msg { + BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => + link.blocks_processed(imported, count, results), + BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => + link.justification_imported(who, &hash, number, success), + BlockImportWorkerMsg::RequestJustification(hash, number) => + link.request_justification(&hash, number), + } + } + /// Polls for the buffered link actions. Any enqueued action will be propagated to the link /// passed as parameter. /// @@ -138,15 +150,17 @@ impl BufferedLinkReceiver { Poll::Pending => break Ok(()), }; - match msg { - BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => - link.blocks_processed(imported, count, results), - BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => - link.justification_imported(who, &hash, number, success), - BlockImportWorkerMsg::RequestJustification(hash, number) => - link.request_justification(&hash, number), - } + self.send_actions(msg, &mut *link); + } + } + + /// Poll next element from import queue and send the corresponding action command over the link. + pub async fn next_action(&mut self, link: &mut dyn Link) -> Result<(), ()> { + if let Some(msg) = self.rx.next().await { + self.send_actions(msg, link); + return Ok(()) } + Err(()) } /// Close the channel. diff --git a/client/consensus/common/src/import_queue/mock.rs b/client/consensus/common/src/import_queue/mock.rs new file mode 100644 index 0000000000000..67deee9514a1c --- /dev/null +++ b/client/consensus/common/src/import_queue/mock.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::*; + +mockall::mock! { + pub ImportQueueHandle {} + + impl ImportQueueService for ImportQueueHandle { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + fn import_justifications( + &mut self, + who: RuntimeOrigin, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ); + } +} + +mockall::mock! { + pub ImportQueue {} + + #[async_trait::async_trait] + impl ImportQueue for ImportQueue { + fn service(&self) -> Box>; + fn service_ref(&mut self) -> &mut dyn ImportQueueService; + fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &mut dyn Link); + async fn run(self, link: Box>); + } +} diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index bed9935698769..bb531c8aedd16 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -317,6 +317,12 @@ pub trait ChainSync: Send { response: BlockResponse, ) -> Result, BadPeer>; + /// Procss received block data. + fn process_block_response_data( + &mut self, + blocks_to_import: Result, BadPeer>, + ); + /// Handle a response from the remote to a justification request that we made. /// /// `request` must be the original request that triggered `response`. @@ -378,7 +384,7 @@ pub trait ChainSync: Send { /// Call when a peer has disconnected. /// Canceled obsolete block request may result in some blocks being ready for /// import, so this functions checks for such blocks and returns them. - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn peer_disconnected(&mut self, who: &PeerId); /// Return some key metrics. fn metrics(&self) -> Metrics; @@ -395,7 +401,10 @@ pub trait ChainSync: Send { /// Internally calls [`ChainSync::poll_block_announce_validation()`] and /// this function should be polled until it returns [`Poll::Pending`] to /// consume all pending events. - fn poll(&mut self, cx: &mut std::task::Context) -> Poll>; + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll>; /// Send block request to peer fn send_block_request(&mut self, who: PeerId, request: BlockRequest); diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 48d6127f642c3..3a977edbca574 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -32,7 +32,6 @@ use libp2p::{ NetworkBehaviour, }; -use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ protocol::{ event::DhtEvent, @@ -43,18 +42,14 @@ use sc_network_common::{ }; use sc_peerset::{PeersetHandle, ReputationChange}; use sp_blockchain::HeaderBackend; -use sp_consensus::BlockOrigin; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - Justifications, -}; +use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, time::Duration}; pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut")] +#[behaviour(out_event = "BehaviourOut")] pub struct Behaviour where B: BlockT, @@ -72,10 +67,7 @@ where } /// Event generated by `Behaviour`. -pub enum BehaviourOut { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), - +pub enum BehaviourOut { /// Started a random iterative Kademlia discovery query. RandomKademliaStarted, @@ -107,10 +99,7 @@ pub enum BehaviourOut { }, /// A request protocol handler issued reputation changes for the given peer. - ReputationChanges { - peer: PeerId, - changes: Vec, - }, + ReputationChanges { peer: PeerId, changes: Vec }, /// Opened a substream with the given node with the given notifications protocol. /// @@ -306,13 +295,9 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl From> for BehaviourOut { +impl From> for BehaviourOut { fn from(event: CustomMessageOutcome) -> Self { match event { - CustomMessageOutcome::BlockImport(origin, blocks) => - BehaviourOut::BlockImport(origin, blocks), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - BehaviourOut::JustificationImport(origin, hash, nb, justification), CustomMessageOutcome::NotificationStreamOpened { remote, protocol, @@ -344,7 +329,7 @@ impl From> for BehaviourOut { } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { request_responses::Event::InboundRequest { peer, protocol, result } => @@ -357,14 +342,14 @@ impl From for BehaviourOut { } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: peer_info::PeerInfoEvent) -> Self { let peer_info::PeerInfoEvent::Identified { peer_id, info } = event; BehaviourOut::PeerIdentify { peer_id, info } } } -impl From for BehaviourOut { +impl From for BehaviourOut { fn from(event: DiscoveryOut) -> Self { match event { DiscoveryOut::UnroutablePeer(_peer_id) => { diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 50d8e2baba60f..fb6d060344bdc 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -40,7 +40,6 @@ use libp2p::{ multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; -use sc_consensus::ImportQueue; use sc_network_common::{ config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, sync::ChainSync, @@ -83,12 +82,6 @@ where /// name on the wire. pub fork_id: Option, - /// Import queue to use. - /// - /// The import queue is the component that verifies that blocks received from other nodes are - /// valid. - pub import_queue: Box>, - /// Instance of chain sync implementation. pub chain_sync: Box>, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index f3faa44ee6dbd..f185458e0dace 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -258,6 +258,7 @@ pub mod network_state; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::PeerInfo; +use sc_consensus::{JustificationSyncLink, Link}; pub use sc_network_common::{ protocol::{ event::{DhtEvent, Event}, @@ -297,11 +298,15 @@ const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; /// Abstraction over syncing-related services pub trait ChainSyncInterface: - NetworkSyncForkRequest> + Send + Sync + NetworkSyncForkRequest> + JustificationSyncLink + Link + Send + Sync { } impl ChainSyncInterface for T where - T: NetworkSyncForkRequest> + Send + Sync + T: NetworkSyncForkRequest> + + JustificationSyncLink + + Link + + Send + + Sync { } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 8c1dd39b49be3..10eb31b595253 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -29,32 +29,26 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, info, log, trace, warn, Level}; +use log::{debug, error, log, trace, warn, Level}; use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; -use sc_consensus::import_queue::{ - BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, -}; use sc_network_common::{ config::NonReservedPeerMode, error, protocol::{role::Roles, ProtocolName}, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake, BlockData, BlockResponse, BlockState}, - BadPeer, ChainSync, ImportResult, OnBlockData, PollBlockAnnounceValidation, PollResult, - SyncStatus, + BadPeer, ChainSync, PollBlockAnnounceValidation, SyncStatus, }, utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, - Justifications, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -481,12 +475,7 @@ where } if let Some(_peer_data) = self.peers.remove(&peer) { - if let Some(OnBlockData::Import(origin, blocks)) = - self.chain_sync.peer_disconnected(&peer) - { - self.pending_messages - .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); - } + self.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) } else { @@ -785,25 +774,13 @@ where }], }, ); + self.chain_sync.process_block_response_data(blocks_to_import); if is_best { self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); } - match blocks_to_import { - Ok(OnBlockData::Import(origin, blocks)) => - CustomMessageOutcome::BlockImport(origin, blocks), - Ok(OnBlockData::Request(peer, req)) => { - self.chain_sync.send_block_request(peer, req); - CustomMessageOutcome::None - }, - Ok(OnBlockData::Continue) => CustomMessageOutcome::None, - Err(BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - }, - } + CustomMessageOutcome::None } /// Call this when a block has been finalized. The sync layer may have some additional @@ -812,58 +789,6 @@ where self.chain_sync.on_block_finalized(&hash, *header.number()) } - /// Request a justification for the given block. - /// - /// Uses `protocol` to queue a new justification request and tries to dispatch all pending - /// requests. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.chain_sync.request_justification(hash, number) - } - - /// Clear all pending justification requests. - pub fn clear_justification_requests(&mut self) { - self.chain_sync.clear_justification_requests(); - } - - /// A batch of blocks have been processed, with or without errors. - /// Call this when a batch of blocks have been processed by the importqueue, with or without - /// errors. - pub fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) { - let results = self.chain_sync.on_blocks_processed(imported, count, results); - for result in results { - match result { - Ok((id, req)) => self.chain_sync.send_block_request(id, req), - Err(BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer(id, repu) - }, - } - } - } - - /// Call this when a justification has been processed by the import queue, with or without - /// errors. - pub fn justification_import_result( - &mut self, - who: PeerId, - hash: B::Hash, - number: NumberFor, - success: bool, - ) { - self.chain_sync.on_justification_import(hash, number, success); - if !success { - info!("💔 Invalid justification provided by {} for #{}", who, hash); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - self.peerset_handle - .report_peer(who, sc_peerset::ReputationChange::new_fatal("Invalid justification")); - } - } - /// Set whether the syncing peers set is in reserved-only mode. pub fn set_reserved_only(&self, reserved_only: bool) { self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); @@ -997,8 +922,6 @@ where #[derive(Debug)] #[must_use] pub enum CustomMessageOutcome { - BlockImport(BlockOrigin, Vec>), - JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, @@ -1106,23 +1029,9 @@ where // Process any received requests received from `NetworkService` and // check if there is any block announcement validation finished. while let Poll::Ready(result) = self.chain_sync.poll(cx) { - match result { - PollResult::Import(import) => self.pending_messages.push_back(match import { - ImportResult::BlockImport(origin, blocks) => - CustomMessageOutcome::BlockImport(origin, blocks), - ImportResult::JustificationImport(origin, hash, number, justifications) => - CustomMessageOutcome::JustificationImport( - origin, - hash, - number, - justifications, - ), - }), - PollResult::Announce(announce) => - match self.process_block_announce_validation_result(announce) { - CustomMessageOutcome::None => {}, - outcome => self.pending_messages.push_back(outcome), - }, + match self.process_block_announce_validation_result(result) { + CustomMessageOutcome::None => {}, + outcome => self.pending_messages.push_back(outcome), } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 7d756ed2d1e88..1b6d603503bf1 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -54,7 +54,6 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ config::{MultiaddrWithPeerId, TransportConfig}, error::Error, @@ -450,7 +449,6 @@ where is_major_syncing, network_service: swarm, service, - import_queue: params.import_queue, from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, @@ -748,13 +746,11 @@ impl sc_consensus::JustificationSyncLink for NetworkSe /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); + let _ = self.chain_sync_service.request_justification(hash, number); } fn clear_justification_requests(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + let _ = self.chain_sync_service.clear_justification_requests(); } } @@ -1208,8 +1204,6 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// /// Each entry corresponds to a method of `NetworkService`. enum ServiceToWorkerMsg { - RequestJustification(B::Hash, NumberFor), - ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), GetValue(KademliaKey), PutValue(KademliaKey, Vec), @@ -1261,8 +1255,6 @@ where service: Arc>, /// The *actual* network. network_service: Swarm>, - /// The import queue that was passed at initialization. - import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. @@ -1290,10 +1282,6 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { let this = &mut *self; - // Poll the import queue for actions to perform. - this.import_queue - .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); - // At the time of writing of this comment, due to a high volume of messages, the network // worker sometimes takes a long time to process the loop below. When that happens, the // rest of the polling is frozen. In order to avoid negative side-effects caused by this @@ -1322,16 +1310,6 @@ where .behaviour_mut() .user_protocol_mut() .announce_block(hash, data), - ServiceToWorkerMsg::RequestJustification(hash, number) => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .request_justification(&hash, number), - ServiceToWorkerMsg::ClearJustificationRequests => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .clear_justification_requests(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1435,23 +1413,6 @@ where match poll_value { Poll::Pending => break, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_blocks_submitted.inc(); - } - this.import_queue.import_blocks(origin, blocks); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport( - origin, - hash, - nb, - justifications, - ))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_justifications_submitted.inc(); - } - this.import_queue.import_justifications(origin, hash, nb, justifications); - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, @@ -1952,51 +1913,6 @@ where { } -// Implementation of `import_queue::Link` trait using the available local variables. -struct NetworkLink<'a, B, Client> -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - protocol: &'a mut Swarm>, -} - -impl<'a, B, Client> Link for NetworkLink<'a, B, Client> -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .on_blocks_processed(imported, count, results) - } - fn justification_imported( - &mut self, - who: PeerId, - hash: &B::Hash, - number: NumberFor, - success: bool, - ) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .justification_import_result(who, *hash, number, success); - } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol - .behaviour_mut() - .user_protocol_mut() - .request_justification(hash, number) - } -} - fn ensure_addresses_consistent_with_transport<'a>( addresses: impl Iterator, transport: &TransportConfig, diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index db1b6f7f6500d..a099bba716eb9 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -53,8 +53,6 @@ pub struct Metrics { pub connections_opened_total: CounterVec, pub distinct_peers_connections_closed_total: Counter, pub distinct_peers_connections_opened_total: Counter, - pub import_queue_blocks_submitted: Counter, - pub import_queue_justifications_submitted: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, pub issued_light_requests: Counter, @@ -103,14 +101,6 @@ impl Metrics { "substrate_sub_libp2p_distinct_peers_connections_opened_total", "Total number of connections opened with distinct peers" )?, registry)?, - import_queue_blocks_submitted: prometheus::register(Counter::new( - "substrate_import_queue_blocks_submitted", - "Number of blocks submitted to the import queue.", - )?, registry)?, - import_queue_justifications_submitted: prometheus::register(Counter::new( - "substrate_import_queue_justifications_submitted", - "Number of justifications submitted to the import queue.", - )?, registry)?, incoming_connections_errors_total: prometheus::register(CounterVec::new( Opts::new( "substrate_sub_libp2p_incoming_connections_handshake_errors_total", diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs index b62fb36461860..7ddcc521fb8f0 100644 --- a/client/network/src/service/tests/chain_sync.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -85,27 +85,26 @@ async fn normal_network_poll_no_peers() { #[async_std::test] async fn request_justification() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `request_justification()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - let hash = H256::random(); let number = 1337u64; - chain_sync - .expect_request_justification() + // build `ChainSyncInterface` provider and and expect + // `JustificationSyncLink::request_justification() to be called once + let mut chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + chain_sync_service + .expect_justification_sync_link_request_justification() .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) .once() .returning(|_, _| ()); + // build `ChainSync` and set default expecations for it + let mut chain_sync = MockChainSync::::new(); + set_default_expecations_no_peers(&mut chain_sync); let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) + .with_chain_sync((Box::new(chain_sync), chain_sync_service)) .build(); // send "request justifiction" message and poll the network @@ -120,17 +119,20 @@ async fn request_justification() { #[async_std::test] async fn clear_justification_requests() { - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = + // build `ChainSyncInterface` provider and expect + // `JustificationSyncLink::clear_justification_requests()` to be called + let mut chain_sync_service = Box::new(MockChainSyncInterface::::new()); - // build `ChainSync` and verify that call to `clear_justification_requests()` is made + chain_sync_service + .expect_justification_sync_link_clear_justification_requests() + .once() + .returning(|| ()); + + // build `ChainSync` and set default expecations for it let mut chain_sync = Box::new(MockChainSync::::new()); - chain_sync.expect_clear_justification_requests().once().returning(|| ()); - set_default_expecations_no_peers(&mut chain_sync); let mut network = TestNetworkBuilder::new() .with_chain_sync((chain_sync, chain_sync_service)) @@ -234,19 +236,13 @@ async fn on_block_finalized() { // and verify that connection to the peer is closed #[async_std::test] async fn invalid_justification_imported() { - struct DummyImportQueue( - Arc< - RwLock< - Option<( - PeerId, - substrate_test_runtime_client::runtime::Hash, - sp_runtime::traits::NumberFor, - )>, - >, - >, - ); + struct DummyImportQueueHandle; - impl sc_consensus::ImportQueue for DummyImportQueue { + impl + sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + > for DummyImportQueueHandle + { fn import_blocks( &mut self, _origin: sp_consensus::BlockOrigin, @@ -264,7 +260,23 @@ async fn invalid_justification_imported() { _justifications: sp_runtime::Justifications, ) { } + } + struct DummyImportQueue( + Arc< + RwLock< + Option<( + PeerId, + substrate_test_runtime_client::runtime::Hash, + sp_runtime::traits::NumberFor, + )>, + >, + >, + DummyImportQueueHandle, + ); + + #[async_trait::async_trait] + impl sc_consensus::ImportQueue for DummyImportQueue { fn poll_actions( &mut self, _cx: &mut futures::task::Context, @@ -274,13 +286,40 @@ async fn invalid_justification_imported() { link.justification_imported(peer, &hash, number, false); } } + + fn service( + &self, + ) -> Box< + dyn sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + >, + > { + Box::new(DummyImportQueueHandle {}) + } + + fn service_ref( + &mut self, + ) -> &mut dyn sc_consensus::import_queue::ImportQueueService< + substrate_test_runtime_client::runtime::Block, + > { + &mut self.1 + } + + async fn run( + self, + _link: Box>, + ) { + } } let justification_info = Arc::new(RwLock::new(None)); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (service1, mut event_stream1) = TestNetworkBuilder::new() - .with_import_queue(Box::new(DummyImportQueue(justification_info.clone()))) + .with_import_queue(Box::new(DummyImportQueue( + justification_info.clone(), + DummyImportQueueHandle {}, + ))) .with_listen_addresses(vec![listen_addr.clone()]) .build() .start_network(); @@ -333,6 +372,7 @@ async fn disconnect_peer_using_chain_sync_handle() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (chain_sync_network_provider, chain_sync_network_handle) = sc_network_sync::service::network::NetworkServiceProvider::new(); let handle_clone = chain_sync_network_handle.clone(); @@ -346,7 +386,9 @@ async fn disconnect_peer_using_chain_sync_handle() { Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), 1u32, None, + None, chain_sync_network_handle.clone(), + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -355,7 +397,7 @@ async fn disconnect_peer_using_chain_sync_handle() { let (node1, mut event_stream1) = TestNetworkBuilder::new() .with_listen_addresses(vec![listen_addr.clone()]) - .with_chain_sync((Box::new(chain_sync), chain_sync_service)) + .with_chain_sync((Box::new(chain_sync), Box::new(chain_sync_service))) .with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle)) .with_client(client.clone()) .build() diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 1d91fc142672f..4b4cb21411b14 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -21,7 +21,7 @@ use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::Multiaddr; use sc_client_api::{BlockBackend, HeaderBackend}; -use sc_consensus::ImportQueue; +use sc_consensus::{ImportQueue, Link}; use sc_network_common::{ config::{ NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, @@ -91,6 +91,7 @@ impl TestNetwork { struct TestNetworkBuilder { import_queue: Option>>, + link: Option>>, client: Option>, listen_addresses: Vec, set_config: Option, @@ -103,6 +104,7 @@ impl TestNetworkBuilder { pub fn new() -> Self { Self { import_queue: None, + link: None, client: None, listen_addresses: Vec::new(), set_config: None, @@ -208,13 +210,14 @@ impl TestNetworkBuilder { } } - let import_queue = self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - ))); + let mut import_queue = + self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + ))); let protocol_id = ProtocolId::from("test-protocol-name"); let fork_id = Some(String::from("test-fork-id")); @@ -285,15 +288,23 @@ impl TestNetworkBuilder { Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), network_config.max_parallel_downloads, None, + None, chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), None, ) .unwrap(); - (Box::new(chain_sync), chain_sync_service) + if let None = self.link { + self.link = Some(Box::new(chain_sync_service.clone())); + } + (Box::new(chain_sync), Box::new(chain_sync_service)) }); + let mut link = self + .link + .unwrap_or(Box::new(sc_network_sync::service::mock::MockChainSyncInterface::new())); let worker = NetworkWorker::< substrate_test_runtime_client::runtime::Block, @@ -307,7 +318,6 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - import_queue, chain_sync, chain_sync_service, metrics_registry: None, @@ -324,6 +334,16 @@ impl TestNetworkBuilder { async_std::task::spawn(async move { let _ = chain_sync_network_provider.run(service).await; }); + async_std::task::spawn(async move { + loop { + futures::future::poll_fn(|cx| { + import_queue.poll_actions(cx, &mut *link); + std::task::Poll::Ready(()) + }) + .await; + async_std::task::sleep(std::time::Duration::from_millis(250)).await; + } + }); TestNetwork::new(worker) } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 841388c7a68ee..8a9db15d8c4c7 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -29,6 +29,7 @@ prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 697445334a073..d91f7f2572aa6 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -54,9 +54,12 @@ use futures::{ }; use libp2p::{request_response::OutboundFailure, PeerId}; use log::{debug, error, info, trace, warn}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; -use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sc_consensus::{ + import_queue::ImportQueueService, BlockImportError, BlockImportStatus, IncomingBlock, +}; use sc_network_common::{ config::{ NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, @@ -71,8 +74,8 @@ use sc_network_common::{ warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, - OpaqueStateResponse, PeerInfo, PeerRequest, PollBlockAnnounceValidation, PollResult, - SyncMode, SyncState, SyncStatus, + OpaqueStateResponse, PeerInfo, PeerRequest, PollBlockAnnounceValidation, SyncMode, + SyncState, SyncStatus, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; @@ -233,6 +236,32 @@ impl Default for AllowedRequests { } } +struct SyncingMetrics { + pub import_queue_blocks_submitted: Counter, + pub import_queue_justifications_submitted: Counter, +} + +impl SyncingMetrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + import_queue_blocks_submitted: register( + Counter::new( + "substrate_import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, + registry, + )?, + import_queue_justifications_submitted: register( + Counter::new( + "substrate_import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, + registry, + )?, + }) + } +} + struct GapSync { blocks: BlockCollection, best_queued_number: NumberFor, @@ -311,6 +340,10 @@ pub struct ChainSync { warp_sync_protocol_name: Option, /// Pending responses pending_responses: FuturesUnordered>, + /// Handle to import queue. + import_queue: Box>, + /// Metrics. + metrics: Option, } /// All the data we have about a Peer that we are trying to sync with @@ -961,6 +994,19 @@ where Ok(self.validate_and_queue_blocks(new_blocks, gap)) } + fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>) { + match blocks_to_import { + Ok(OnBlockData::Import(origin, blocks)) => self.import_blocks(origin, blocks), + Ok(OnBlockData::Request(peer, req)) => self.send_block_request(peer, req), + Ok(OnBlockData::Continue) => {}, + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + }, + } + } + fn on_block_justification( &mut self, who: PeerId, @@ -1331,7 +1377,7 @@ where } } - fn peer_disconnected(&mut self, who: &PeerId) -> Option> { + fn peer_disconnected(&mut self, who: &PeerId) { self.blocks.clear_peer_download(who); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_peer_download(who) @@ -1343,8 +1389,13 @@ where target.peers.remove(who); !target.peers.is_empty() }); + let blocks = self.ready_blocks(); - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + if let Some(OnBlockData::Import(origin, blocks)) = + (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + { + self.import_blocks(origin, blocks); + } } fn metrics(&self) -> Metrics { @@ -1421,22 +1472,56 @@ where .map_err(|error: codec::Error| error.to_string()) } - fn poll(&mut self, cx: &mut std::task::Context) -> Poll> { + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll> { while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { match event { ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.set_sync_fork_request(peers, &hash, number); }, + ToServiceCommand::RequestJustification(hash, number) => + self.request_justification(&hash, number), + ToServiceCommand::ClearJustificationRequests => self.clear_justification_requests(), + ToServiceCommand::BlocksProcessed(imported, count, results) => { + for result in self.on_blocks_processed(imported, count, results) { + match result { + Ok((id, req)) => self.send_block_request(id, req), + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu) + }, + } + } + }, + ToServiceCommand::JustificationImported(peer, hash, number, success) => { + self.on_justification_import(hash, number, success); + if !success { + info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash); + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + self.network_service.report_peer( + peer, + sc_peerset::ReputationChange::new_fatal("Invalid justification"), + ); + } + }, } } self.process_outbound_requests(); if let Poll::Ready(result) = self.poll_pending_responses(cx) { - return Poll::Ready(PollResult::Import(result)) + match result { + ImportResult::BlockImport(origin, blocks) => self.import_blocks(origin, blocks), + ImportResult::JustificationImport(who, hash, number, justifications) => + self.import_justifications(who, hash, number, justifications), + } } if let Poll::Ready(announce) = self.poll_block_announce_validation(cx) { - return Poll::Ready(PollResult::Announce(announce)) + return Poll::Ready(announce) } Poll::Pending @@ -1494,11 +1579,13 @@ where block_announce_validator: Box + Send>, max_parallel_downloads: u32, warp_sync_provider: Option>>, + metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, + import_queue: Box>, block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - ) -> Result<(Self, Box>, NonDefaultSetConfig), ClientError> { + ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); let block_announce_config = Self::get_block_announce_proto_config( protocol_id, @@ -1544,10 +1631,22 @@ where .clone() .into(), pending_responses: Default::default(), + import_queue, + metrics: if let Some(r) = &metrics_registry { + match SyncingMetrics::register(r) { + Ok(metrics) => Some(metrics), + Err(err) => { + error!(target: "sync", "Failed to register metrics for ChainSync: {err:?}"); + None + }, + } + } else { + None + }, }; sync.reset_sync_start_point()?; - Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)), block_announce_config)) + Ok((sync, ChainSyncInterfaceHandle::new(tx), block_announce_config)) } /// Returns the median seen block number. @@ -2173,8 +2272,10 @@ where if request.fields == BlockAttributes::JUSTIFICATION { match self.on_block_justification(peer_id, block_response) { Ok(OnBlockJustification::Nothing) => None, - Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => - Some(ImportResult::JustificationImport(peer, hash, number, justifications)), + Ok(OnBlockJustification::Import { peer, hash, number, justifications }) => { + self.import_justifications(peer, hash, number, justifications); + None + }, Err(BadPeer(id, repu)) => { self.network_service .disconnect_peer(id, self.block_announce_protocol_name.clone()); @@ -2184,8 +2285,10 @@ where } } else { match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(origin, blocks)) => - Some(ImportResult::BlockImport(origin, blocks)), + Ok(OnBlockData::Import(origin, blocks)) => { + self.import_blocks(origin, blocks); + None + }, Ok(OnBlockData::Request(peer, req)) => { self.send_block_request(peer, req); None @@ -2712,6 +2815,28 @@ where }, } } + + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + if let Some(metrics) = &self.metrics { + metrics.import_queue_blocks_submitted.inc(); + } + + self.import_queue.import_blocks(origin, blocks); + } + + fn import_justifications( + &mut self, + peer: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { + if let Some(metrics) = &self.metrics { + metrics.import_queue_justifications_submitted.inc(); + } + + self.import_queue.import_justifications(peer, hash, number, justifications); + } } // This is purely during a backwards compatible transitionary period and should be removed @@ -3089,6 +3214,7 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (mut sync, _, _) = ChainSync::new( @@ -3100,7 +3226,9 @@ mod test { block_announce_validator, 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3151,6 +3279,7 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); @@ -3163,7 +3292,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3330,6 +3461,7 @@ mod test { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); @@ -3342,7 +3474,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3453,6 +3587,7 @@ mod test { }; let mut client = Arc::new(TestClientBuilder::new().build()); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let info = client.info(); @@ -3466,7 +3601,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3584,6 +3721,7 @@ mod test { fn can_sync_huge_fork() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3619,7 +3757,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3722,6 +3862,7 @@ mod test { fn syncs_fork_without_duplicate_requests() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3757,7 +3898,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3881,6 +4024,7 @@ mod test { #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); @@ -3895,7 +4039,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, @@ -3921,6 +4067,7 @@ mod test { #[test] fn can_import_response_with_missing_blocks() { sp_tracing::try_init_simple(); + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let mut client2 = Arc::new(TestClientBuilder::new().build()); @@ -3937,7 +4084,9 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs index 48d72c425bd03..143c02fa98323 100644 --- a/client/network/sync/src/mock.rs +++ b/client/network/sync/src/mock.rs @@ -25,7 +25,7 @@ use sc_consensus::{BlockImportError, BlockImportStatus}; use sc_network_common::sync::{ message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, - OpaqueBlockResponse, PeerInfo, PollBlockAnnounceValidation, PollResult, SyncStatus, + OpaqueBlockResponse, PeerInfo, PollBlockAnnounceValidation, SyncStatus, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -60,6 +60,7 @@ mockall::mock! { request: Option>, response: BlockResponse, ) -> Result, BadPeer>; + fn process_block_response_data(&mut self, blocks_to_import: Result, BadPeer>); fn on_block_justification( &mut self, who: PeerId, @@ -89,7 +90,7 @@ mockall::mock! { &mut self, cx: &mut std::task::Context<'a>, ) -> Poll>; - fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn peer_disconnected(&mut self, who: &PeerId); fn metrics(&self) -> Metrics; fn block_response_into_blocks( &self, @@ -99,7 +100,7 @@ mockall::mock! { fn poll<'a>( &mut self, cx: &mut std::task::Context<'a>, - ) -> Poll>; + ) -> Poll>; fn send_block_request( &mut self, who: PeerId, diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index cf07c65ee3109..50ded5b643dea 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use libp2p::PeerId; +use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network_common::service::NetworkSyncForkRequest; use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -25,9 +26,18 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; #[derive(Debug)] pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), + RequestJustification(B::Hash, NumberFor), + ClearJustificationRequests, + BlocksProcessed( + usize, + usize, + Vec<(Result>, BlockImportError>, B::Hash)>, + ), + JustificationImported(PeerId, B::Hash, NumberFor, bool), } /// Handle for communicating with `ChainSync` asynchronously +#[derive(Clone)] pub struct ChainSyncInterfaceHandle { tx: TracingUnboundedSender>, } @@ -56,3 +66,46 @@ impl NetworkSyncForkRequest> .unbounded_send(ToServiceCommand::SetSyncForkRequest(peers, hash, number)); } } + +impl JustificationSyncLink for ChainSyncInterfaceHandle { + /// Request a justification for the given block from the network. + /// + /// On success, the justification will be passed to the import queue that was part at + /// initialization as part of the configuration. + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); + } + + fn clear_justification_requests(&self) { + let _ = self.tx.unbounded_send(ToServiceCommand::ClearJustificationRequests); + } +} + +impl Link for ChainSyncInterfaceHandle { + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::BlocksProcessed(imported, count, results)); + } + + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::JustificationImported(who, *hash, number, success)); + } + + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); + } +} diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs index c8a29e1fba8ea..d8aad2fa7bac1 100644 --- a/client/network/sync/src/service/mock.rs +++ b/client/network/sync/src/service/mock.rs @@ -18,6 +18,7 @@ use futures::channel::oneshot; use libp2p::{Multiaddr, PeerId}; +use sc_consensus::{BlockImportError, BlockImportStatus}; use sc_network_common::{ config::MultiaddrWithPeerId, protocol::ProtocolName, @@ -29,13 +30,43 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; mockall::mock! { - pub ChainSyncInterface {} + pub ChainSyncInterface { + pub fn justification_sync_link_request_justification(&self, hash: &B::Hash, number: NumberFor); + pub fn justification_sync_link_clear_justification_requests(&self); + } impl NetworkSyncForkRequest> for ChainSyncInterface { fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor); } + + impl sc_consensus::Link for ChainSyncInterface { + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ); + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ); + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor); + } +} + +impl sc_consensus::JustificationSyncLink for MockChainSyncInterface { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + self.justification_sync_link_request_justification(hash, number); + } + + fn clear_justification_requests(&self) { + self.justification_sync_link_clear_justification_requests(); + } } mockall::mock! { diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs index bd78c3b45226d..e6ed67dd9d0e8 100644 --- a/client/network/sync/src/tests.rs +++ b/client/network/sync/src/tests.rs @@ -37,6 +37,7 @@ use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _ // poll `ChainSync` and verify that a new sync fork request has been registered #[async_std::test] async fn delegate_to_chainsync() { + let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (mut chain_sync, chain_sync_service, _) = ChainSync::new( sc_network_common::sync::SyncMode::Full, @@ -47,7 +48,9 @@ async fn delegate_to_chainsync() { Box::new(DefaultBlockAnnounceValidator), 1u32, None, + None, chain_sync_network_handle, + import_queue, ProtocolName::from("block-request"), ProtocolName::from("state-request"), None, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 975d902157310..1b9385aef6195 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -44,8 +44,8 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, - Verifier, + ForkChoiceStrategy, ImportQueue, ImportResult, JustificationImport, JustificationSyncLink, + LongestChain, Verifier, }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, @@ -886,7 +886,9 @@ where block_announce_validator, network_config.max_parallel_downloads, Some(warp_sync), + None, chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), @@ -900,9 +902,8 @@ where chain: client.clone(), protocol_id, fork_id, - import_queue, chain_sync: Box::new(chain_sync), - chain_sync_service, + chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: None, block_announce_config, request_response_protocol_configs: [ @@ -921,6 +922,9 @@ where async_std::task::spawn(async move { chain_sync_network_provider.run(service).await; }); + async_std::task::spawn(async move { + import_queue.run(Box::new(chain_sync_service)).await; + }); self.mut_peers(move |peers| { for peer in peers.iter_mut() { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 63d60fb06f471..83c3d493e468b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -860,7 +860,9 @@ where block_announce_validator, config.network.max_parallel_downloads, warp_sync_provider, + config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), chain_sync_network_handle, + import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), @@ -884,9 +886,8 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), - chain_sync_service, + chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, request_response_protocol_configs: request_response_protocol_configs @@ -932,6 +933,7 @@ where Some("networking"), chain_sync_network_provider.run(network.clone()), ); + spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(chain_sync_service))); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index c0612124dd0c2..ca09c1658d72f 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -157,7 +157,7 @@ fn import_block_to_queue( let (header, extrinsics) = signed_block.block.deconstruct(); let hash = header.hash(); // import queue handles verification and importing it into the client. - queue.import_blocks( + queue.service_ref().import_blocks( BlockOrigin::File, vec![IncomingBlock:: { hash, From 1d1b955a1283691fe77175b5aa896eea7906b487 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 22 Nov 2022 14:42:52 +0200 Subject: [PATCH 02/30] Move stuff to SyncingEngine --- client/network/Cargo.toml | 1 + client/network/src/config.rs | 10 +- client/network/src/protocol.rs | 140 +++++--------------- client/network/src/service.rs | 3 +- client/network/src/service/tests/mod.rs | 10 +- client/network/sync/src/engine.rs | 163 ++++++++++++++++++++++++ client/network/sync/src/lib.rs | 1 + client/network/test/src/lib.rs | 9 +- client/service/src/builder.rs | 13 +- 9 files changed, 229 insertions(+), 121 deletions(-) create mode 100644 client/network/sync/src/engine.rs diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index afd9880148081..d468f3b2c98f5 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -47,6 +47,7 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "./common" } +sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-arithmetic = { version = "6.0.0", path = "../../primitives/arithmetic" } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index fb6d060344bdc..c8449386933d9 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -30,6 +30,7 @@ pub use sc_network_common::{ sync::warp::WarpSyncProvider, ExHashT, }; +use sc_network_sync::engine::SyncingEngine; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; @@ -40,9 +41,8 @@ use libp2p::{ multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; -use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, - sync::ChainSync, +use sc_network_common::config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig, }; use sp_runtime::traits::Block as BlockT; use std::{ @@ -82,8 +82,8 @@ where /// name on the wire. pub fork_id: Option, - /// Instance of chain sync implementation. - pub chain_sync: Box>, + /// Syncing engine. + pub engine: SyncingEngine, /// Interface that can be used to delegate syncing-related function calls to `ChainSync` pub chain_sync_service: Box>, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 10eb31b595253..2e9c4a3788440 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -33,7 +33,6 @@ use log::{debug, error, log, trace, warn, Level}; use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; -use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; use sc_network_common::{ config::NonReservedPeerMode, @@ -41,10 +40,11 @@ use sc_network_common::{ protocol::{role::Roles, ProtocolName}, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake, BlockData, BlockResponse, BlockState}, - BadPeer, ChainSync, PollBlockAnnounceValidation, SyncStatus, + BadPeer, PollBlockAnnounceValidation, SyncStatus, }, utils::{interval, LruHashSet}, }; +use sc_network_sync::engine::SyncingEngine; use sp_arithmetic::traits::SaturatedConversion; use sp_runtime::{ generic::BlockId, @@ -101,43 +101,6 @@ mod rep { pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } -struct Metrics { - peers: Gauge, - queued_blocks: Gauge, - fork_targets: Gauge, - justifications: GaugeVec, -} - -impl Metrics { - fn register(r: &Registry) -> Result { - Ok(Self { - peers: { - let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?; - register(g, r)? - }, - queued_blocks: { - let g = - Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?; - register(g, r)? - }, - fork_targets: { - let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?; - register(g, r)? - }, - justifications: { - let g = GaugeVec::new( - Opts::new( - "substrate_sync_extra_justifications", - "Number of extra justifications requests", - ), - &["status"], - )?; - register(g, r)? - }, - }) - } -} - // Lock must always be taken in order declared here. pub struct Protocol { /// Interval at which we call `tick`. @@ -147,9 +110,6 @@ pub struct Protocol { /// Assigned roles. roles: Roles, genesis_hash: B::Hash, - /// State machine that handles the list of in-progress requests. Only full node peers are - /// registered. - chain_sync: Box>, // All connected peers. Contains both full and light node peers. peers: HashMap>, chain: Arc, @@ -177,12 +137,12 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - /// Prometheus metrics. - metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, /// A cache for the data that was associated to a block announcement. block_announce_data_cache: LruCache>, + // TODO: remove eventually + engine: SyncingEngine, } /// Peer information @@ -214,9 +174,8 @@ where roles: Roles, chain: Arc, network_config: &config::NetworkConfiguration, - metrics_registry: Option<&Registry>, - chain_sync: Box>, block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, + engine: SyncingEngine, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); @@ -344,7 +303,6 @@ where peers: HashMap::new(), chain, genesis_hash: info.genesis_hash, - chain_sync, important_peers, default_peers_set_no_slot_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), @@ -360,11 +318,7 @@ where .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) .collect(), bad_handshake_substreams: Default::default(), - metrics: if let Some(r) = metrics_registry { - Some(Metrics::register(r)?) - } else { - None - }, + engine, boot_node_ids, block_announce_data_cache, }; @@ -404,44 +358,44 @@ where /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.chain_sync.num_active_peers() + self.engine.chain_sync.num_active_peers() } /// Current global sync state. pub fn sync_state(&self) -> SyncStatus { - self.chain_sync.status() + self.engine.chain_sync.status() } /// Target sync block number. pub fn best_seen_block(&self) -> Option> { - self.chain_sync.status().best_seen_block + self.engine.chain_sync.status().best_seen_block } /// Number of peers participating in syncing. pub fn num_sync_peers(&self) -> u32 { - self.chain_sync.status().num_peers + self.engine.chain_sync.status().num_peers } /// Number of blocks in the import queue. pub fn num_queued_blocks(&self) -> u32 { - self.chain_sync.status().queued_blocks + self.engine.chain_sync.status().queued_blocks } /// Number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.chain_sync.num_downloaded_blocks() + self.engine.chain_sync.num_downloaded_blocks() } /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { - self.chain_sync.num_sync_requests() + self.engine.chain_sync.num_sync_requests() } /// Inform sync about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); - self.chain_sync.update_chain_info(&hash, number); + self.engine.chain_sync.update_chain_info(&hash, number); self.behaviour.set_notif_protocol_handshake( HARDCODED_PEERSETS_SYNC, @@ -451,7 +405,7 @@ where } fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.chain_sync.peer_info(who) { + if let Some(info) = self.engine.chain_sync.peer_info(who) { if let Some(ref mut peer) = self.peers.get_mut(who) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; @@ -475,7 +429,7 @@ where } if let Some(_peer_data) = self.peers.remove(&peer) { - self.chain_sync.peer_disconnected(&peer); + self.engine.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) } else { @@ -488,13 +442,6 @@ where self.peerset_handle.report_peer(who, reputation) } - /// Perform time based maintenance. - /// - /// > **Note**: This method normally doesn't have to be called except for testing purposes. - pub fn tick(&mut self) { - self.report_metrics() - } - /// Called on the first connection between two peers on the default set, after their exchange /// of handshake. /// @@ -563,7 +510,7 @@ where let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; if status.roles.is_full() && - self.chain_sync.num_peers() >= + self.engine.chain_sync.num_peers() >= self.default_peers_set_num_full + self.default_peers_set_no_slot_connected_peers.len() + this_peer_reserved_slot @@ -574,7 +521,8 @@ where } if status.roles.is_light() && - (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light + (self.peers.len() - self.engine.chain_sync.num_peers()) >= + self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. debug!(target: "sync", "Too many light nodes, rejecting {}", who); @@ -594,7 +542,7 @@ where }; let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + match self.engine.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { Ok(req) => req, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); @@ -616,7 +564,7 @@ where .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); if let Some(req) = req { - self.chain_sync.send_block_request(who, req); + self.engine.chain_sync.send_block_request(who, req); } Ok(()) @@ -700,7 +648,9 @@ where }; if peer.info.roles.is_full() { - self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); + self.engine + .chain_sync + .push_block_announce_validation(who, hash, announce, is_best); } } @@ -757,7 +707,7 @@ where // to import header from announced block let's construct response to request that normally // would have been sent over network (but it is not in our case) - let blocks_to_import = self.chain_sync.on_block_data( + let blocks_to_import = self.engine.chain_sync.on_block_data( &who, None, BlockResponse:: { @@ -774,7 +724,7 @@ where }], }, ); - self.chain_sync.process_block_response_data(blocks_to_import); + self.engine.chain_sync.process_block_response_data(blocks_to_import); if is_best { self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); @@ -786,7 +736,7 @@ where /// Call this when a block has been finalized. The sync layer may have some additional /// requesting to perform. pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.chain_sync.on_block_finalized(&hash, *header.number()) + self.engine.chain_sync.on_block_finalized(&hash, *header.number()) } /// Set whether the syncing peers set is in reserved-only mode. @@ -887,35 +837,6 @@ where ); } } - - fn report_metrics(&self) { - if let Some(metrics) = &self.metrics { - let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); - metrics.peers.set(n); - - let m = self.chain_sync.metrics(); - - metrics.fork_targets.set(m.fork_targets.into()); - metrics.queued_blocks.set(m.queued_blocks.into()); - - metrics - .justifications - .with_label_values(&["pending"]) - .set(m.justifications.pending_requests.into()); - metrics - .justifications - .with_label_values(&["active"]) - .set(m.justifications.active_requests.into()); - metrics - .justifications - .with_label_values(&["failed"]) - .set(m.justifications.failed_requests.into()); - metrics - .justifications - .with_label_values(&["importing"]) - .set(m.justifications.importing_requests.into()); - } - } } /// Outcome of an incoming custom message. @@ -1028,7 +949,7 @@ where // // Process any received requests received from `NetworkService` and // check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.chain_sync.poll(cx) { + while let Poll::Ready(result) = self.engine.chain_sync.poll(cx) { match self.process_block_announce_validation_result(result) { CustomMessageOutcome::None => {}, outcome => self.pending_messages.push_back(outcome), @@ -1036,7 +957,7 @@ where } while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { - self.tick(); + self.engine.report_metrics(); } if let Some(message) = self.pending_messages.pop_front() { @@ -1203,7 +1124,8 @@ where // Make sure that the newly added block announce validation future was // polled once to be registered in the task. - if let Poll::Ready(res) = self.chain_sync.poll_block_announce_validation(cx) + if let Poll::Ready(res) = + self.engine.chain_sync.poll_block_announce_validation(cx) { self.process_block_announce_validation_result(res) } else { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1b6d603503bf1..cb3b00732c09d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -224,9 +224,8 @@ where From::from(¶ms.role), params.chain.clone(), ¶ms.network_config, - params.metrics_registry.as_ref(), - params.chain_sync, params.block_announce_config, + params.engine, )?; // List of multiaddresses that we know in the network. diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 4b4cb21411b14..b7ed176bbdd16 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -36,6 +36,7 @@ use sc_network_sync::{ block_request_handler::BlockRequestHandler, service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, + engine::SyncingEngine, ChainSync, }; use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; @@ -306,6 +307,13 @@ impl TestNetworkBuilder { .link .unwrap_or(Box::new(sc_network_sync::service::mock::MockChainSyncInterface::new())); + let engine = SyncingEngine::new( + Roles::from(&config::Role::Full), + client.clone(), + chain_sync, + None, + ); + let worker = NetworkWorker::< substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::Hash, @@ -318,7 +326,7 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - chain_sync, + engine, chain_sync_service, metrics_registry: None, request_response_protocol_configs: [ diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs new file mode 100644 index 0000000000000..51049ac80532b --- /dev/null +++ b/client/network/sync/src/engine.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; + +use sc_network_common::{protocol::role::Roles, sync::ChainSync}; +use sp_runtime::traits::Block as BlockT; + +use std::sync::Arc; + +struct Metrics { + _peers: Gauge, + queued_blocks: Gauge, + fork_targets: Gauge, + justifications: GaugeVec, +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Self { + _peers: { + let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?; + register(g, r)? + }, + queued_blocks: { + let g = + Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?; + register(g, r)? + }, + fork_targets: { + let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?; + register(g, r)? + }, + justifications: { + let g = GaugeVec::new( + Opts::new( + "substrate_sync_extra_justifications", + "Number of extra justifications requests", + ), + &["status"], + )?; + register(g, r)? + }, + }) + } +} + +pub struct SyncingEngine { + // /// Interval at which we call `tick`. + // tick_timeout: Pin + Send>>, + // /// Pending list of messages to return from `poll` as a priority. + // pending_messages: VecDeque>, + /// Assigned roles. + _roles: Roles, + /// State machine that handles the list of in-progress requests. Only full node peers are + /// registered. + pub chain_sync: Box>, + // /// All connected peers. Contains both full and light node peers. + // peers: HashMap>, + _client: Arc, + // /// List of nodes for which we perform additional logging because they are important for the + // /// user. + // important_peers: HashSet, + // /// List of nodes that should never occupy peer slots. + // default_peers_set_no_slot_peers: HashSet, + // /// Actual list of connected no-slot nodes. + // default_peers_set_no_slot_connected_peers: HashSet, + // /// Value that was passed as part of the configuration. Used to cap the number of full + // nodes. default_peers_set_num_full: usize, + // /// Number of slots to allocate to light nodes. + // default_peers_set_num_light: usize, + // /// Used to report reputation changes. + // peerset_handle: sc_peerset::PeersetHandle, + // /// Handles opening the unique substream and sending and receiving raw messages. + // behaviour: Notifications, + // /// List of notifications protocols that have been registered. + // notification_protocols: Vec, + // /// If we receive a new "substream open" event that contains an invalid handshake, we ask + // the /// inner layer to force-close the substream. Force-closing the substream will generate + // a /// "substream closed" event. This is a problem: since we can't propagate the "substream + // open" /// event to the outer layers, we also shouldn't propagate this "substream closed" + // event. To /// solve this, an entry is added to this map whenever an invalid handshake is + // received. /// Entries are removed when the corresponding "substream closed" is later + // received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, + /// Prometheus metrics. + metrics: Option, + // /// The `PeerId`'s of all boot nodes. + // boot_node_ids: HashSet, + // /// A cache for the data that was associated to a block announcement. + // block_announce_data_cache: LruCache>, +} + +impl SyncingEngine { + pub fn new( + roles: Roles, + client: Arc, + chain_sync: Box>, + metrics_registry: Option<&Registry>, + ) -> Self { + Self { + _roles: roles, + _client: client, + chain_sync, + metrics: if let Some(r) = metrics_registry { + match Metrics::register(r) { + Ok(metrics) => Some(metrics), + Err(err) => { + log::error!(target: "sync", "Failed to register metrics {err:?}"); + None + }, + } + } else { + None + }, + } + } + + /// Report Prometheus metrics. + pub fn report_metrics(&self) { + if let Some(metrics) = &self.metrics { + // TODO(aaro): fix + // let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); + // metrics.peers.set(n); + + let m = self.chain_sync.metrics(); + + metrics.fork_targets.set(m.fork_targets.into()); + metrics.queued_blocks.set(m.queued_blocks.into()); + + metrics + .justifications + .with_label_values(&["pending"]) + .set(m.justifications.pending_requests.into()); + metrics + .justifications + .with_label_values(&["active"]) + .set(m.justifications.active_requests.into()); + metrics + .justifications + .with_label_values(&["failed"]) + .set(m.justifications.failed_requests.into()); + metrics + .justifications + .with_label_values(&["importing"]) + .set(m.justifications.importing_requests.into()); + } + } +} diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index d91f7f2572aa6..90db96f9feec0 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,6 +30,7 @@ pub mod block_request_handler; pub mod blocks; +pub mod engine; pub mod mock; mod schema; pub mod service; diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 1b9385aef6195..ec1b4e2a230a7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -895,6 +895,13 @@ where ) .unwrap(); + let engine = sc_network_sync::engine::SyncingEngine::new( + Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), + client.clone(), + Box::new(chain_sync), + None, + ); + let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -902,7 +909,7 @@ where chain: client.clone(), protocol_id, fork_id, - chain_sync: Box::new(chain_sync), + engine, chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: None, block_announce_config, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 83c3d493e468b..9851632887b6a 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -46,8 +46,8 @@ use sc_network_common::{ }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, - state_request_handler::StateRequestHandler, + block_request_handler::BlockRequestHandler, engine::SyncingEngine, + service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler, warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, }; use sc_rpc::{ @@ -868,6 +868,13 @@ where warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), )?; + let engine = SyncingEngine::new( + Roles::from(&config.role), + client.clone(), + Box::new(chain_sync), + config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), + ); + request_response_protocol_configs.push(config.network.ipfs_server.then(|| { let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run()); @@ -886,7 +893,7 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - chain_sync: Box::new(chain_sync), + engine, chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, From bd8f6a28c87e0bee2f4dd2dcd640951dc58c8790 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 22 Nov 2022 15:23:00 +0200 Subject: [PATCH 03/30] Move `ChainSync` instanation to `SyncingEngine` Some of the tests have to be rewritten --- client/network/src/protocol.rs | 19 +++- client/network/src/service.rs | 35 +++++- .../network/src/service/tests/chain_sync.rs | 2 + client/network/src/service/tests/mod.rs | 97 +++++----------- client/network/src/service/tests/service.rs | 71 ++++++------ client/network/sync/src/engine.rs | 107 ++++++++++++++---- client/network/test/src/lib.rs | 59 +++++----- client/service/src/builder.rs | 17 +-- 8 files changed, 227 insertions(+), 180 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2e9c4a3788440..06873b5b0de09 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -33,7 +33,7 @@ use log::{debug, error, log, trace, warn, Level}; use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; -use sc_client_api::HeaderBackend; +use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_network_common::{ config::NonReservedPeerMode, error, @@ -46,6 +46,7 @@ use sc_network_common::{ }; use sc_network_sync::engine::SyncingEngine; use sp_arithmetic::traits::SaturatedConversion; +use sp_blockchain::HeaderMetadata; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, @@ -167,7 +168,13 @@ pub struct PeerInfo { impl Protocol where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Create a new instance. pub fn new( @@ -880,7 +887,13 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol where B: BlockT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { type ConnectionHandler = ::ConnectionHandler; type OutEvent = CustomMessageOutcome; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index cb3b00732c09d..afb5d6f3d0b11 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -54,6 +54,7 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::{MultiaddrWithPeerId, TransportConfig}, error::Error, @@ -73,7 +74,7 @@ use sc_network_common::{ }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::HeaderBackend; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ cmp, @@ -136,7 +137,13 @@ impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Creates the network service. /// @@ -1242,7 +1249,13 @@ pub struct NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -1274,7 +1287,13 @@ impl Future for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { type Output = (); @@ -1908,7 +1927,13 @@ impl Unpin for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, { } diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs index 7ddcc521fb8f0..52ec72dfc7ede 100644 --- a/client/network/src/service/tests/chain_sync.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/* use crate::{ config, service::tests::{TestNetworkBuilder, BLOCK_ANNOUNCE_PROTO_NAME}, @@ -443,3 +444,4 @@ async fn disconnect_peer_using_chain_sync_handle() { panic!("did not receive disconnection event in time"); } } +*/ diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index b7ed176bbdd16..ac7c08f9aa6e2 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -34,9 +34,9 @@ use sc_network_common::{ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, + engine::SyncingEngine, service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, - engine::SyncingEngine, ChainSync, }; use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; @@ -244,75 +244,36 @@ impl TestNetworkBuilder { protocol_config }; - let block_announce_config = NonDefaultSetConfig { - notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), - fallback_names: vec![], - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< - substrate_test_runtime_client::runtime::Block, - >::build( - Roles::from(&config::Role::Full), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ))), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - }; - let (chain_sync_network_provider, chain_sync_network_handle) = self.chain_sync_network.unwrap_or(NetworkServiceProvider::new()); - let (chain_sync, chain_sync_service) = self.chain_sync.unwrap_or({ - let (chain_sync, chain_sync_service, _) = ChainSync::new( - match network_config.sync_mode { - config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode, - }, - config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, - client.clone(), - protocol_id.clone(), - &fork_id, - Roles::from(&config::Role::Full), - Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), - network_config.max_parallel_downloads, - None, - None, - chain_sync_network_handle, - import_queue.service(), - block_request_protocol_config.name.clone(), - state_request_protocol_config.name.clone(), - None, - ) - .unwrap(); - - if let None = self.link { - self.link = Some(Box::new(chain_sync_service.clone())); - } - (Box::new(chain_sync), Box::new(chain_sync_service)) - }); - let mut link = self - .link - .unwrap_or(Box::new(sc_network_sync::service::mock::MockChainSyncInterface::new())); - - let engine = SyncingEngine::new( - Roles::from(&config::Role::Full), - client.clone(), - chain_sync, - None, - ); + let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( + Roles::from(&config::Role::Full), + client.clone(), + None, + match network_config.sync_mode { + config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { + skip_proofs, + storage_chain_mode, + }, + config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + protocol_id.clone(), + None, + &fork_id, + Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), + network_config.max_parallel_downloads, + None, + chain_sync_network_handle, + import_queue.service(), + block_request_protocol_config.name.clone(), + state_request_protocol_config.name.clone(), + None, + ) + .unwrap(); + let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); let worker = NetworkWorker::< substrate_test_runtime_client::runtime::Block, @@ -327,7 +288,7 @@ impl TestNetworkBuilder { protocol_id, fork_id, engine, - chain_sync_service, + chain_sync_service: Box::new(chain_sync_service), metrics_registry: None, request_response_protocol_configs: [ block_request_protocol_config, diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 90945fdcef2cf..516e80bfc6390 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -401,41 +401,42 @@ fn fallback_name_working() { }); } -// Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement -// protocol name and verify that `SyncDisconnected` event is emitted -#[async_std::test] -async fn disconnect_sync_peer_using_block_announcement_protocol_name() { - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut events_stream1).await; - wait_for_events(&mut events_stream2).await; - - // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted - node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); - assert!(std::matches!( - events_stream2.next().await, - Some(Event::NotificationStreamClosed { .. }) - )); - let _ = events_stream2.next().await; // ignore the reopen event - - // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is - // emitted - node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); - assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); -} +// TODO(aaro): fix this test, how though? +// // Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement +// // protocol name and verify that `SyncDisconnected` event is emitted +// #[async_std::test] +// async fn disconnect_sync_peer_using_block_announcement_protocol_name() { +// let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + +// async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { +// let mut notif_received = false; +// let mut sync_received = false; + +// while !notif_received || !sync_received { +// match stream.next().await.unwrap() { +// Event::NotificationStreamOpened { .. } => notif_received = true, +// Event::SyncConnected { .. } => sync_received = true, +// _ => {}, +// }; +// } +// } + +// wait_for_events(&mut events_stream1).await; +// wait_for_events(&mut events_stream2).await; + +// // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted +// node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); +// assert!(std::matches!( +// events_stream2.next().await, +// Some(Event::NotificationStreamClosed { .. }) +// )); +// let _ = events_stream2.next().await; // ignore the reopen event + +// // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is +// // emitted +// node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); +// assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); +// } #[test] #[should_panic(expected = "don't match the transport")] diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 51049ac80532b..47074c9702cf4 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -16,9 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::{service, ChainSync, ChainSyncInterfaceHandle, ClientError}; + use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; -use sc_network_common::{protocol::role::Roles, sync::ChainSync}; +use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; +use sc_consensus::import_queue::ImportQueueService; +use sc_network_common::{ + config::{NonDefaultSetConfig, ProtocolId}, + protocol::{role::Roles, ProtocolName}, + sync::{warp::WarpSyncProvider, ChainSync as ChainSyncT, SyncMode}, +}; +use sp_blockchain::HeaderMetadata; +use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::traits::Block as BlockT; use std::sync::Arc; @@ -60,19 +70,24 @@ impl Metrics { } } +// TODO(aaro): reorder these properly and remove stuff that is not needed pub struct SyncingEngine { + /// State machine that handles the list of in-progress requests. Only full node peers are + /// registered. + pub chain_sync: Box>, + + /// Blockchain client. + _client: Arc, + + /// Network service. + _network_service: service::network::NetworkServiceHandle, + // /// Interval at which we call `tick`. // tick_timeout: Pin + Send>>, - // /// Pending list of messages to return from `poll` as a priority. - // pending_messages: VecDeque>, /// Assigned roles. _roles: Roles, - /// State machine that handles the list of in-progress requests. Only full node peers are - /// registered. - pub chain_sync: Box>, // /// All connected peers. Contains both full and light node peers. // peers: HashMap>, - _client: Arc, // /// List of nodes for which we perform additional logging because they are important for the // /// user. // important_peers: HashSet, @@ -105,29 +120,71 @@ pub struct SyncingEngine { // block_announce_data_cache: LruCache>, } -impl SyncingEngine { +impl SyncingEngine +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ pub fn new( roles: Roles, client: Arc, - chain_sync: Box>, metrics_registry: Option<&Registry>, - ) -> Self { - Self { - _roles: roles, - _client: client, - chain_sync, - metrics: if let Some(r) = metrics_registry { - match Metrics::register(r) { - Ok(metrics) => Some(metrics), - Err(err) => { - log::error!(target: "sync", "Failed to register metrics {err:?}"); - None - }, - } - } else { - None + mode: SyncMode, + protocol_id: ProtocolId, + fork_id: &Option, + block_announce_validator: Box + Send>, + max_parallel_downloads: u32, + warp_sync_provider: Option>>, + network_service: service::network::NetworkServiceHandle, + import_queue: Box>, + block_request_protocol_name: ProtocolName, + state_request_protocol_name: ProtocolName, + warp_sync_protocol_name: Option, + ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { + let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( + mode, + client.clone(), + protocol_id, + fork_id, + roles, + block_announce_validator, + max_parallel_downloads, + warp_sync_provider, + metrics_registry, + network_service.clone(), + import_queue, + block_request_protocol_name, + state_request_protocol_name, + warp_sync_protocol_name, + )?; + + Ok(( + Self { + _roles: roles, + _client: client, + chain_sync: Box::new(chain_sync), + _network_service: network_service, + metrics: if let Some(r) = metrics_registry { + match Metrics::register(r) { + Ok(metrics) => Some(metrics), + Err(err) => { + log::error!(target: "sync", "Failed to register metrics {err:?}"); + None + }, + } + } else { + None + }, }, - } + chain_sync_service, + block_announce_config, + )) } /// Report Prometheus metrics. diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index ec1b4e2a230a7..99e46171eaa01 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -62,7 +62,7 @@ use sc_network_common::{ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, - state_request_handler::StateRequestHandler, warp_request_handler, ChainSync, + state_request_handler::StateRequestHandler, warp_request_handler, }; use sc_service::client::Client; use sp_blockchain::{ @@ -869,38 +869,33 @@ where .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( - match network_config.sync_mode { - SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode, - }, - SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, - client.clone(), - protocol_id.clone(), - &fork_id, - Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), - block_announce_validator, - network_config.max_parallel_downloads, - Some(warp_sync), - None, - chain_sync_network_handle, - import_queue.service(), - block_request_protocol_config.name.clone(), - state_request_protocol_config.name.clone(), - Some(warp_protocol_config.name.clone()), - ) - .unwrap(); - let engine = sc_network_sync::engine::SyncingEngine::new( - Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), - client.clone(), - Box::new(chain_sync), - None, - ); + let (engine, chain_sync_service, block_announce_config) = + sc_network_sync::engine::SyncingEngine::new( + Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), + client.clone(), + None, + match network_config.sync_mode { + SyncMode::Full => sc_network_common::sync::SyncMode::Full, + SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { + skip_proofs, + storage_chain_mode, + }, + SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + protocol_id.clone(), + &fork_id, + block_announce_validator, + network_config.max_parallel_downloads, + Some(warp_sync), + chain_sync_network_handle, + import_queue.service(), + block_request_protocol_config.name.clone(), + state_request_protocol_config.name.clone(), + Some(warp_protocol_config.name.clone()), + ) + .unwrap(); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 9851632887b6a..d8ee9bf62f817 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -48,7 +48,7 @@ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, engine::SyncingEngine, service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler, - warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, + warp_request_handler::RequestHandler as WarpSyncRequestHandler, }; use sc_rpc::{ author::AuthorApiServer, @@ -846,21 +846,21 @@ where }; let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( + let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( + Roles::from(&config.role), + client.clone(), + config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, SyncMode::Fast { skip_proofs, storage_chain_mode } => sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, - client.clone(), protocol_id.clone(), &config.chain_spec.fork_id().map(ToOwned::to_owned), - Roles::from(&config.role), block_announce_validator, config.network.max_parallel_downloads, warp_sync_provider, - config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), chain_sync_network_handle, import_queue.service(), block_request_protocol_config.name.clone(), @@ -868,13 +868,6 @@ where warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), )?; - let engine = SyncingEngine::new( - Roles::from(&config.role), - client.clone(), - Box::new(chain_sync), - config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), - ); - request_response_protocol_configs.push(config.network.ipfs_server.then(|| { let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run()); From 95695934690b2d34de11835351ddffb6ac2198a2 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 22 Nov 2022 17:45:14 +0200 Subject: [PATCH 04/30] Move peer hashmap to `SyncingEngine` --- client/network/common/src/sync.rs | 13 ++ client/network/src/lib.rs | 3 +- client/network/src/protocol.rs | 216 +++--------------------- client/network/src/service.rs | 6 +- client/network/src/service/tests/mod.rs | 4 +- client/network/sync/src/engine.rs | 180 +++++++++++++++++++- client/network/test/src/lib.rs | 2 + client/service/src/builder.rs | 8 +- 8 files changed, 222 insertions(+), 210 deletions(-) diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index bb531c8aedd16..6b6f67087382f 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -22,6 +22,8 @@ pub mod message; pub mod metrics; pub mod warp; +use crate::protocol::role::Roles; + use libp2p::PeerId; use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; use sc_consensus::{ @@ -44,6 +46,17 @@ pub struct PeerInfo { pub best_number: NumberFor, } +/// Info about a peer's known state (both full and light). +#[derive(Clone, Debug)] +pub struct ExtendedPeerInfo { + /// Roles + pub roles: Roles, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: NumberFor, +} + /// Reported sync state. #[derive(Clone, Eq, PartialEq, Debug)] pub enum SyncState { diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index f185458e0dace..550e651a13e28 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -257,7 +257,6 @@ pub mod network_state; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::PeerInfo; use sc_consensus::{JustificationSyncLink, Link}; pub use sc_network_common::{ protocol::{ @@ -273,7 +272,7 @@ pub use sc_network_common::{ }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - StateDownloadProgress, SyncState, + ExtendedPeerInfo, StateDownloadProgress, SyncState, }, }; pub use service::{ diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 06873b5b0de09..9d1246300f63d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -30,7 +30,6 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, log, trace, warn, Level}; -use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; @@ -39,12 +38,12 @@ use sc_network_common::{ error, protocol::{role::Roles, ProtocolName}, sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockData, BlockResponse, BlockState}, - BadPeer, PollBlockAnnounceValidation, SyncStatus, + message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, + BadPeer, ExtendedPeerInfo, SyncStatus, }, utils::{interval, LruHashSet}, }; -use sc_network_sync::engine::SyncingEngine; +use sc_network_sync::engine::{Peer, SyncingEngine}; use sp_arithmetic::traits::SaturatedConversion; use sp_blockchain::HeaderMetadata; use sp_runtime::{ @@ -52,7 +51,7 @@ use sp_runtime::{ traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, }; use std::{ - collections::{HashMap, HashSet, VecDeque}, + collections::{HashSet, VecDeque}, io, iter, num::NonZeroUsize, pin::Pin, @@ -98,8 +97,6 @@ mod rep { pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer role does not match (e.g. light peer connecting to another light peer). pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); - /// Peer send us a block announcement that failed at validation. - pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } // Lock must always be taken in order declared here. @@ -111,8 +108,6 @@ pub struct Protocol { /// Assigned roles. roles: Roles, genesis_hash: B::Hash, - // All connected peers. Contains both full and light node peers. - peers: HashMap>, chain: Arc, /// List of nodes for which we perform additional logging because they are important for the /// user. @@ -140,31 +135,10 @@ pub struct Protocol { bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, - /// A cache for the data that was associated to a block announcement. - block_announce_data_cache: LruCache>, // TODO: remove eventually engine: SyncingEngine, } -/// Peer information -#[derive(Debug)] -struct Peer { - info: PeerInfo, - /// Holds a set of blocks known to this peer. - known_blocks: LruHashSet, -} - -/// Info about a peer's known state. -#[derive(Clone, Debug)] -pub struct PeerInfo { - /// Roles - pub roles: Roles, - /// Peer best block hash - pub best_hash: B::Hash, - /// Peer best block number - pub best_number: ::Number, -} - impl Protocol where B: BlockT, @@ -295,19 +269,10 @@ where ) }; - let cache_capacity = NonZeroUsize::new( - (network_config.default_peers_set.in_peers as usize + - network_config.default_peers_set.out_peers as usize) - .max(1), - ) - .expect("cache capacity is not zero"); - let block_announce_data_cache = LruCache::new(cache_capacity); - let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), pending_messages: VecDeque::new(), roles, - peers: HashMap::new(), chain, genesis_hash: info.genesis_hash, important_peers, @@ -327,7 +292,6 @@ where bad_handshake_substreams: Default::default(), engine, boot_node_ids, - block_announce_data_cache, }; Ok((protocol, peerset_handle, known_addresses)) @@ -360,7 +324,7 @@ where /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.peers.len() + self.engine.peers.len() } /// Returns the number of peers we're connected to and that are being queried. @@ -411,18 +375,9 @@ where ); } - fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.engine.chain_sync.peer_info(who) { - if let Some(ref mut peer) = self.peers.get_mut(who) { - peer.info.best_hash = info.best_hash; - peer.info.best_number = info.best_number; - } - } - } - /// Returns information about all the peers we are connected to after the handshake message. - pub fn peers_info(&self) -> impl Iterator)> { - self.peers.iter().map(|(id, peer)| (id, &peer.info)) + pub fn peers_info(&self) -> impl Iterator)> { + self.engine.peers.iter().map(|(id, peer)| (id, &peer.info)) } /// Called by peer when it is disconnecting. @@ -435,7 +390,7 @@ where debug!(target: "sync", "{} disconnected", peer); } - if let Some(_peer_data) = self.peers.remove(&peer) { + if let Some(_peer_data) = self.engine.peers.remove(&peer) { self.engine.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); Ok(()) @@ -461,7 +416,7 @@ where ) -> Result<(), ()> { trace!(target: "sync", "New peer {} {:?}", who, status); - if self.peers.contains_key(&who) { + if self.engine.peers.contains_key(&who) { error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); debug_assert!(false); return Err(()) @@ -528,7 +483,7 @@ where } if status.roles.is_light() && - (self.peers.len() - self.engine.chain_sync.num_peers()) >= + (self.engine.peers.len() - self.engine.chain_sync.num_peers()) >= self.default_peers_set_num_light { // Make sure that not all slots are occupied by light clients. @@ -538,7 +493,7 @@ where } let peer = Peer { - info: PeerInfo { + info: ExtendedPeerInfo { roles: status.roles, best_hash: status.best_hash, best_number: status.best_number, @@ -563,7 +518,7 @@ where debug!(target: "sync", "Connected {}", who); - self.peers.insert(who, peer); + self.engine.peers.insert(who, peer); if no_slot_peer { self.default_peers_set_no_slot_connected_peers.insert(who); } @@ -603,10 +558,10 @@ where debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); let data = data - .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) + .or_else(|| self.engine.block_announce_data_cache.get(&hash).cloned()) .unwrap_or_default(); - for (who, ref mut peer) in self.peers.iter_mut() { + for (who, ref mut peer) in self.engine.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); if inserted { trace!(target: "sync", "Announcing block {:?} to {}", hash, who); @@ -622,124 +577,7 @@ where } } - /// Push a block announce validation. - /// - /// It is required that [`ChainSync::poll_block_announce_validation`] is - /// called later to check for finished validations. The result of the validation - /// needs to be passed to [`Protocol::process_block_announce_validation_result`] - /// to finish the processing. - /// - /// # Note - /// - /// This will internally create a future, but this future will not be registered - /// in the task before being polled once. So, it is required to call - /// [`ChainSync::poll_block_announce_validation`] to ensure that the future is - /// registered properly and will wake up the task when being ready. - fn push_block_announce_validation(&mut self, who: PeerId, announce: BlockAnnounce) { - let hash = announce.header.hash(); - - let peer = match self.peers.get_mut(&who) { - Some(p) => p, - None => { - log::error!(target: "sync", "Received block announce from disconnected peer {}", who); - debug_assert!(false); - return - }, - }; - - peer.known_blocks.insert(hash); - - let is_best = match announce.state.unwrap_or(BlockState::Best) { - BlockState::Best => true, - BlockState::Normal => false, - }; - - if peer.info.roles.is_full() { - self.engine - .chain_sync - .push_block_announce_validation(who, hash, announce, is_best); - } - } - - /// Process the result of the block announce validation. - fn process_block_announce_validation_result( - &mut self, - validation_result: PollBlockAnnounceValidation, - ) -> CustomMessageOutcome { - let (header, is_best, who) = match validation_result { - PollBlockAnnounceValidation::Skip => return CustomMessageOutcome::None, - PollBlockAnnounceValidation::Nothing { is_best, who, announce } => { - self.update_peer_info(&who); - - if let Some(data) = announce.data { - if !data.is_empty() { - self.block_announce_data_cache.put(announce.header.hash(), data); - } - } - - // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` - // when we have all data required to import the block - // in the BlockAnnounce message. This is only when: - // 1) we're on light client; - // AND - // 2) parent block is already imported and not pruned. - if is_best { - return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()) - } else { - return CustomMessageOutcome::None - } - }, - PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { - self.update_peer_info(&who); - - if let Some(data) = announce.data { - if !data.is_empty() { - self.block_announce_data_cache.put(announce.header.hash(), data); - } - } - - (announce.header, is_best, who) - }, - PollBlockAnnounceValidation::Failure { who, disconnect } => { - if disconnect { - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - } - - self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); - return CustomMessageOutcome::None - }, - }; - - let number = *header.number(); - - // to import header from announced block let's construct response to request that normally - // would have been sent over network (but it is not in our case) - let blocks_to_import = self.engine.chain_sync.on_block_data( - &who, - None, - BlockResponse:: { - id: 0, - blocks: vec![BlockData:: { - hash: header.hash(), - header: Some(header), - body: None, - indexed_body: None, - receipt: None, - message_queue: None, - justification: None, - justifications: None, - }], - }, - ); - self.engine.chain_sync.process_block_response_data(blocks_to_import); - - if is_best { - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); - } - - CustomMessageOutcome::None - } - + // TODO: implement block fianalized for chainsyncinterface /// Call this when a block has been finalized. The sync layer may have some additional /// requesting to perform. pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { @@ -958,16 +796,8 @@ where return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } - // Advance the state of `ChainSync` - // - // Process any received requests received from `NetworkService` and - // check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.engine.chain_sync.poll(cx) { - match self.process_block_announce_validation_result(result) { - CustomMessageOutcome::None => {}, - outcome => self.pending_messages.push_back(outcome), - } - } + // poll syncing engine + self.engine.poll(cx); while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { self.engine.report_metrics(); @@ -1061,7 +891,7 @@ where } else { match ( Roles::decode_all(&mut &received_handshake[..]), - self.peers.get(&peer_id), + self.engine.peers.get(&peer_id), ) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, @@ -1131,19 +961,19 @@ where } }, NotificationsOut::Notification { peer_id, set_id, message } => match set_id { - HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { + HARDCODED_PEERSETS_SYNC if self.engine.peers.contains_key(&peer_id) => { if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.push_block_announce_validation(peer_id, announce); + self.engine.push_block_announce_validation(peer_id, announce); // Make sure that the newly added block announce validation future was // polled once to be registered in the task. if let Poll::Ready(res) = self.engine.chain_sync.poll_block_announce_validation(cx) { - self.process_block_announce_validation_result(res) - } else { - CustomMessageOutcome::None + self.engine.process_block_announce_validation_result(res) } + + CustomMessageOutcome::None } else { warn!(target: "sub-libp2p", "Failed to decode block announce"); CustomMessageOutcome::None diff --git a/client/network/src/service.rs b/client/network/src/service.rs index afb5d6f3d0b11..83260133ee300 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -34,7 +34,7 @@ use crate::{ network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready}, + protocol::{self, NotificationsSink, NotifsHandlerError, Protocol, Ready}, transport, ChainSyncInterface, ReputationChange, }; @@ -69,7 +69,7 @@ use sc_network_common::{ NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, - sync::SyncStatus, + sync::{ExtendedPeerInfo, SyncStatus}, ExHashT, }; use sc_peerset::PeersetHandle; @@ -661,7 +661,7 @@ where } /// Get currently connected peers. - pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { + pub fn peers_debug_info(&mut self) -> Vec<(PeerId, ExtendedPeerInfo)> { self.network_service .behaviour_mut() .user_protocol_mut() diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index ac7c08f9aa6e2..f350128ad3bb0 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -261,8 +261,7 @@ impl TestNetworkBuilder { config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, }, protocol_id.clone(), - None, - &fork_id, + &None, Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), network_config.max_parallel_downloads, None, @@ -271,6 +270,7 @@ impl TestNetworkBuilder { block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), None, + std::num::NonZeroUsize::new(16).unwrap(), ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 47074c9702cf4..bf6c46fd887bf 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -18,6 +18,8 @@ use crate::{service, ChainSync, ChainSyncInterfaceHandle, ClientError}; +use libp2p::PeerId; +use lru::LruCache; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; @@ -25,13 +27,35 @@ use sc_consensus::import_queue::ImportQueueService; use sc_network_common::{ config::{NonDefaultSetConfig, ProtocolId}, protocol::{role::Roles, ProtocolName}, - sync::{warp::WarpSyncProvider, ChainSync as ChainSyncT, SyncMode}, + sync::{ + message::{ + generic::{BlockData, BlockResponse}, + BlockAnnounce, BlockState, + }, + warp::WarpSyncProvider, + ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncMode, + }, + utils::LruHashSet, }; use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, Header}; -use std::sync::Arc; +use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, task::Poll}; + +mod rep { + use sc_peerset::ReputationChange as Rep; + // /// Reputation change when we are a light client and a peer is behind us. + // pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); + // /// We received a message that failed to decode. + // pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + // /// Peer has different genesis. + // pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); + // /// Peer role does not match (e.g. light peer connecting to another light peer). + // pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); + /// Peer send us a block announcement that failed at validation. + pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); +} struct Metrics { _peers: Gauge, @@ -70,6 +94,14 @@ impl Metrics { } } +/// Peer information +#[derive(Debug)] +pub struct Peer { + pub info: ExtendedPeerInfo, + /// Holds a set of blocks known to this peer. + pub known_blocks: LruHashSet, +} + // TODO(aaro): reorder these properly and remove stuff that is not needed pub struct SyncingEngine { /// State machine that handles the list of in-progress requests. Only full node peers are @@ -80,14 +112,20 @@ pub struct SyncingEngine { _client: Arc, /// Network service. - _network_service: service::network::NetworkServiceHandle, + network_service: service::network::NetworkServiceHandle, // /// Interval at which we call `tick`. // tick_timeout: Pin + Send>>, /// Assigned roles. _roles: Roles, - // /// All connected peers. Contains both full and light node peers. - // peers: HashMap>, + /// All connected peers. Contains both full and light node peers. + pub peers: HashMap>, + + /// A cache for the data that was associated to a block announcement. + pub block_announce_data_cache: LruCache>, + + /// Protocol name used for block announcements + block_announce_protocol_name: ProtocolName, // /// List of nodes for which we perform additional logging because they are important for the // /// user. // important_peers: HashSet, @@ -116,8 +154,6 @@ pub struct SyncingEngine { metrics: Option, // /// The `PeerId`'s of all boot nodes. // boot_node_ids: HashSet, - // /// A cache for the data that was associated to a block announcement. - // block_announce_data_cache: LruCache>, } impl SyncingEngine @@ -131,6 +167,7 @@ where + Sync + 'static, { + // TODO(aaro): clean up these parameters pub fn new( roles: Roles, client: Arc, @@ -146,6 +183,7 @@ where block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, + cache_capacity: NonZeroUsize, ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( mode, @@ -164,12 +202,16 @@ where warp_sync_protocol_name, )?; + let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); Ok(( Self { _roles: roles, _client: client, chain_sync: Box::new(chain_sync), - _network_service: network_service, + network_service, + peers: HashMap::new(), + block_announce_data_cache: LruCache::new(cache_capacity), + block_announce_protocol_name, metrics: if let Some(r) = metrics_registry { match Metrics::register(r) { Ok(metrics) => Some(metrics), @@ -217,4 +259,124 @@ where .set(m.justifications.importing_requests.into()); } } + + fn update_peer_info(&mut self, who: &PeerId) { + if let Some(info) = self.chain_sync.peer_info(who) { + if let Some(ref mut peer) = self.peers.get_mut(who) { + peer.info.best_hash = info.best_hash; + peer.info.best_number = info.best_number; + } + } + } + + // TODO: emit peernewbest event? + /// Process the result of the block announce validation. + pub fn process_block_announce_validation_result( + &mut self, + validation_result: PollBlockAnnounceValidation, + ) { + let (header, _is_best, who) = match validation_result { + PollBlockAnnounceValidation::Skip => return, + PollBlockAnnounceValidation::Nothing { is_best: _, who, announce } => { + self.update_peer_info(&who); + + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + + return + }, + PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { + self.update_peer_info(&who); + + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + + (announce.header, is_best, who) + }, + PollBlockAnnounceValidation::Failure { who, disconnect } => { + if disconnect { + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + } + + self.network_service.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); + return + }, + }; + + // to import header from announced block let's construct response to request that normally + // would have been sent over network (but it is not in our case) + let blocks_to_import = self.chain_sync.on_block_data( + &who, + None, + BlockResponse { + id: 0, + blocks: vec![BlockData { + hash: header.hash(), + header: Some(header), + body: None, + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }], + }, + ); + + self.chain_sync.process_block_response_data(blocks_to_import); + } + + /// Push a block announce validation. + /// + /// It is required that [`ChainSync::poll_block_announce_validation`] is + /// called later to check for finished validations. The result of the validation + /// needs to be passed to [`Protocol::process_block_announce_validation_result`] + /// to finish the processing. + /// + /// # Note + /// + /// This will internally create a future, but this future will not be registered + /// in the task before being polled once. So, it is required to call + /// [`ChainSync::poll_block_announce_validation`] to ensure that the future is + /// registered properly and will wake up the task when being ready. + pub fn push_block_announce_validation( + &mut self, + who: PeerId, + announce: BlockAnnounce, + ) { + let hash = announce.header.hash(); + + let peer = match self.peers.get_mut(&who) { + Some(p) => p, + None => { + log::error!(target: "sync", "Received block announce from disconnected peer {}", who); + debug_assert!(false); + return + }, + }; + + peer.known_blocks.insert(hash); + + let is_best = match announce.state.unwrap_or(BlockState::Best) { + BlockState::Best => true, + BlockState::Normal => false, + }; + + if peer.info.roles.is_full() { + self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); + } + } + + pub fn poll(&mut self, cx: &mut std::task::Context) { + while let Poll::Ready(result) = self.chain_sync.poll(cx) { + self.process_block_announce_validation_result(result); + } + } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 99e46171eaa01..eef9378224105 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -25,6 +25,7 @@ mod sync; use std::{ collections::HashMap, marker::PhantomData, + num::NonZeroUsize, pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, @@ -894,6 +895,7 @@ where block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), + NonZeroUsize::new(16).unwrap(), ) .unwrap(); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index d8ee9bf62f817..49ece6129c658 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -74,7 +74,7 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}, BuildStorage, }; -use std::{str::FromStr, sync::Arc, time::SystemTime}; +use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. pub type TFullClient = @@ -866,6 +866,12 @@ where block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), + NonZeroUsize::new( + (config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize) + .max(1), + ) + .expect("cache capacity is not zero"), )?; request_response_protocol_configs.push(config.network.ipfs_server.then(|| { From befeac386e546a33bfccf79c28cf2088ea7b173d Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Wed, 23 Nov 2022 08:25:10 +0200 Subject: [PATCH 05/30] Let `SyncingEngine` to implement `ChainSyncInterface` --- .../src/communication/tests.rs | 4 + client/network-gossip/src/bridge.rs | 4 + client/network-gossip/src/state_machine.rs | 4 + client/network/common/src/service.rs | 7 + client/network/src/lib.rs | 8 +- client/network/src/protocol.rs | 99 +++++------- .../src/protocol/notifications/behaviour.rs | 42 ----- client/network/src/service.rs | 32 ++-- client/network/sync/src/engine.rs | 146 ++++++++++++++++-- client/network/sync/src/lib.rs | 61 ++------ client/network/sync/src/service/chain_sync.rs | 14 +- client/network/sync/src/service/mock.rs | 15 +- client/network/sync/src/service/network.rs | 30 +++- client/network/sync/src/tests.rs | 6 +- client/network/test/src/lib.rs | 9 +- client/network/test/src/sync.rs | 38 ++--- 16 files changed, 306 insertions(+), 213 deletions(-) diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index eab7bb2df50cf..5d66e81f6b56c 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -153,6 +153,10 @@ impl NetworkNotification for TestNetwork { ) -> Result, NotificationSenderError> { unimplemented!(); } + + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { + unimplemented!(); + } } impl NetworkBlock> for TestNetwork { diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 5563b3be35e8d..c716f39c61d4f 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -434,6 +434,10 @@ mod tests { ) -> Result, NotificationSenderError> { unimplemented!(); } + + fn set_notification_handshake(&self, _protocol: ProtocolName, handshake: Vec) { + unimplemented!(); + } } impl NetworkBlock<::Hash, NumberFor> for TestNetwork { diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 001f2c6136a00..817e4326a49c4 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -675,6 +675,10 @@ mod tests { ) -> Result, NotificationSenderError> { unimplemented!(); } + + fn set_notification_handshake(&self, _protocol: ProtocolName, handshake: Vec) { + unimplemented!(); + } } impl NetworkBlock<::Hash, NumberFor> for NoOpNetwork { diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 54d254eac384f..abc55a54c2f98 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -504,6 +504,9 @@ pub trait NetworkNotification { target: PeerId, protocol: ProtocolName, ) -> Result, NotificationSenderError>; + + /// Set handshake for the notification protocol. + fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec); } impl NetworkNotification for Arc @@ -522,6 +525,10 @@ where ) -> Result, NotificationSenderError> { T::notification_sender(self, target, protocol) } + + fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec) { + T::set_notification_handshake(self, protocol, handshake) + } } /// Provides ability to send network requests. diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 550e651a13e28..b2468b174848f 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -297,7 +297,12 @@ const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; /// Abstraction over syncing-related services pub trait ChainSyncInterface: - NetworkSyncForkRequest> + JustificationSyncLink + Link + Send + Sync + NetworkSyncForkRequest> + + JustificationSyncLink + + Link + + NetworkBlock> + + Send + + Sync { } @@ -305,6 +310,7 @@ impl ChainSyncInterface for T where T: NetworkSyncForkRequest> + JustificationSyncLink + Link + + NetworkBlock> + Send + Sync { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9d1246300f63d..caee998a0b134 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -38,7 +38,7 @@ use sc_network_common::{ error, protocol::{role::Roles, ProtocolName}, sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, + message::{BlockAnnounce, BlockAnnouncesHandshake}, BadPeer, ExtendedPeerInfo, SyncStatus, }, utils::{interval, LruHashSet}, @@ -46,10 +46,7 @@ use sc_network_common::{ use sc_network_sync::engine::{Peer, SyncingEngine}; use sp_arithmetic::traits::SaturatedConversion; use sp_blockchain::HeaderMetadata; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, -}; +use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{HashSet, VecDeque}, io, iter, @@ -322,6 +319,7 @@ where self.behaviour.peerset_debug_info() } + // TODO(aaro): implement using behaviour? /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { self.engine.peers.len() @@ -362,19 +360,7 @@ where self.engine.chain_sync.num_sync_requests() } - /// Inform sync about new best imported block. - pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); - - self.engine.chain_sync.update_chain_info(&hash, number); - - self.behaviour.set_notif_protocol_handshake( - HARDCODED_PEERSETS_SYNC, - BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) - .encode(), - ); - } - + // TODO(aaro): implement using ChainSyncInterface /// Returns information about all the peers we are connected to after the handshake message. pub fn peers_info(&self) -> impl Iterator)> { self.engine.peers.iter().map(|(id, peer)| (id, &peer.info)) @@ -532,48 +518,17 @@ where Ok(()) } - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. - pub fn announce_block(&mut self, hash: B::Hash, data: Option>) { - let header = match self.chain.header(BlockId::Hash(hash)) { - Ok(Some(header)) => header, - Ok(None) => { - warn!("Trying to announce unknown block: {}", hash); - return - }, - Err(e) => { - warn!("Error reading block header {}: {}", hash, e); - return - }, - }; - - // don't announce genesis block since it will be ignored - if header.number().is_zero() { - return - } - - let is_best = self.chain.info().best_hash == hash; - debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); - - let data = data - .or_else(|| self.engine.block_announce_data_cache.get(&hash).cloned()) - .unwrap_or_default(); - - for (who, ref mut peer) in self.engine.peers.iter_mut() { - let inserted = peer.known_blocks.insert(hash); - if inserted { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); - let message = BlockAnnounce { - header: header.clone(), - state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) }, - data: Some(data.clone()), - }; - - self.behaviour - .write_notification(who, HARDCODED_PEERSETS_SYNC, message.encode()); - } + /// Set handshake for the notification protocol. + pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.behaviour + .set_notif_protocol_handshake(sc_peerset::SetId::from(index), handshake); + } else { + error!( + target: "sub-libp2p", + "set_notification_handshake with unknown protocol: {}", + protocol + ); } } @@ -846,7 +801,18 @@ where genesis_hash: handshake.genesis_hash, }; + let roles = handshake.roles; if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + self.pending_messages.push_back( + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + }, + ); CustomMessageOutcome::SyncConnected(peer_id) } else { CustomMessageOutcome::None @@ -866,8 +832,21 @@ where match as DecodeAll>::decode_all( &mut &received_handshake[..], ) { + // TODO: korjaa tämä toimimaan Ok(handshake) => { + let roles = handshake.roles; if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + self.pending_messages.push_back( + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id)] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + }, + ); CustomMessageOutcome::SyncConnected(peer_id) } else { CustomMessageOutcome::None diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 04f6fe445ac63..bf8e93385dd84 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -554,48 +554,6 @@ impl Notifications { self.peerset.reserved_peers(set_id) } - /// Sends a notification to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even if we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - /// - /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't - /// support our protocol. One needs to pass the equivalent of what would have been passed - /// with `send_packet`. - pub fn write_notification( - &mut self, - target: &PeerId, - set_id: sc_peerset::SetId, - message: impl Into>, - ) { - let notifs_sink = match self.peers.get(&(*target, set_id)).and_then(|p| p.get_open()) { - None => { - trace!( - target: "sub-libp2p", - "Tried to sent notification to {:?} without an open channel.", - target, - ); - return - }, - Some(sink) => sink, - }; - - let message = message.into(); - - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, - set_id, - message.len(), - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - - notifs_sink.send_sync_notification(message); - } - /// Returns the state of the peerset manager, for debugging purposes. pub fn peerset_debug_info(&mut self) -> serde_json::Value { self.peerset.debug_info() diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 83260133ee300..b518f2454b1b3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -549,14 +549,6 @@ where .on_block_finalized(hash, &header); } - /// Inform the network service about new best imported block. - pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - self.network_service - .behaviour_mut() - .user_protocol_mut() - .new_best_block_imported(hash, number); - } - /// Returns the local `PeerId`. pub fn local_peer_id(&self) -> &PeerId { Swarm::>::local_peer_id(&self.network_service) @@ -1077,6 +1069,12 @@ where Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) } + + fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake)); + } } #[async_trait::async_trait] @@ -1129,13 +1127,11 @@ where H: ExHashT, { fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + let _ = self.chain_sync_service.announce_block(hash, data); } fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); + let _ = self.chain_sync_service.new_best_block_imported(hash, number); } } @@ -1210,7 +1206,6 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// /// Each entry corresponds to a method of `NetworkService`. enum ServiceToWorkerMsg { - AnnounceBlock(B::Hash, Option>), GetValue(KademliaKey), PutValue(KademliaKey, Vec), AddKnownAddress(PeerId, Multiaddr), @@ -1238,7 +1233,7 @@ enum ServiceToWorkerMsg { pending_response: oneshot::Sender>, }, DisconnectPeer(PeerId, ProtocolName), - NewBestBlockImported(B::Hash, NumberFor), + SetNotificationHandshake(ProtocolName, Vec), } /// Main network worker. Must be polled in order for the network to advance. @@ -1323,11 +1318,6 @@ where Poll::Pending => break, }; match msg { - ServiceToWorkerMsg::AnnounceBlock(hash, data) => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .announce_block(hash, data), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1406,11 +1396,11 @@ where .behaviour_mut() .user_protocol_mut() .disconnect_peer(&who, protocol_name), - ServiceToWorkerMsg::NewBestBlockImported(hash, number) => this + ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake) => this .network_service .behaviour_mut() .user_protocol_mut() - .new_best_block_imported(hash, number), + .set_notification_handshake(protocol, handshake), } } diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index bf6c46fd887bf..f7d7aa70c668d 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,12 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{service, ChainSync, ChainSyncInterfaceHandle, ClientError}; +use crate::{ + service::{self, chain_sync::ToServiceCommand}, + ChainSync, ChainSyncInterfaceHandle, ClientError, +}; +use futures::StreamExt; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; +use codec::Encode; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network_common::{ @@ -30,16 +35,20 @@ use sc_network_common::{ sync::{ message::{ generic::{BlockData, BlockResponse}, - BlockAnnounce, BlockState, + BlockAnnounce, BlockAnnouncesHandshake, BlockState, }, warp::WarpSyncProvider, - ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncMode, + BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncMode, }, utils::LruHashSet, }; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor, Zero}, +}; use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, task::Poll}; @@ -109,15 +118,22 @@ pub struct SyncingEngine { pub chain_sync: Box>, /// Blockchain client. - _client: Arc, + client: Arc, /// Network service. network_service: service::network::NetworkServiceHandle, + /// Channel for receiving service commands + service_rx: TracingUnboundedReceiver>, + // /// Interval at which we call `tick`. // tick_timeout: Pin + Send>>, /// Assigned roles. - _roles: Roles, + roles: Roles, + + /// Genesis hash. + genesis_hash: B::Hash, + /// All connected peers. Contains both full and light node peers. pub peers: HashMap>, @@ -185,7 +201,7 @@ where warp_sync_protocol_name: Option, cache_capacity: NonZeroUsize, ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { - let (chain_sync, chain_sync_service, block_announce_config) = ChainSync::new( + let (chain_sync, block_announce_config) = ChainSync::new( mode, client.clone(), protocol_id, @@ -203,15 +219,24 @@ where )?; let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); + let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); + let genesis_hash = client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"); + Ok(( Self { - _roles: roles, - _client: client, + roles, + client, chain_sync: Box::new(chain_sync), network_service, peers: HashMap::new(), block_announce_data_cache: LruCache::new(cache_capacity), block_announce_protocol_name, + service_rx, + genesis_hash, metrics: if let Some(r) = metrics_registry { match Metrics::register(r) { Ok(metrics) => Some(metrics), @@ -224,7 +249,7 @@ where None }, }, - chain_sync_service, + ChainSyncInterfaceHandle::new(tx), block_announce_config, )) } @@ -374,7 +399,106 @@ where } } + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. + pub fn announce_block(&mut self, hash: B::Hash, data: Option>) { + let header = match self.client.header(BlockId::Hash(hash)) { + Ok(Some(header)) => header, + Ok(None) => { + log::warn!(target: "sync", "Trying to announce unknown block: {}", hash); + return + }, + Err(e) => { + log::warn!(target: "sync", "Error reading block header {}: {}", hash, e); + return + }, + }; + + // don't announce genesis block since it will be ignored + if header.number().is_zero() { + return + } + + let is_best = self.client.info().best_hash == hash; + log::debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); + + let data = data + .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) + .unwrap_or_default(); + + for (who, ref mut peer) in self.peers.iter_mut() { + let inserted = peer.known_blocks.insert(hash); + if inserted { + log::trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + let message = BlockAnnounce { + header: header.clone(), + state: if is_best { Some(BlockState::Best) } else { Some(BlockState::Normal) }, + data: Some(data.clone()), + }; + + self.network_service.write_notification( + *who, + self.block_announce_protocol_name.clone(), + message.encode(), + ); + } + } + } + + /// Inform sync about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); + + self.chain_sync.update_chain_info(&hash, number); + self.network_service.set_notification_handshake( + self.block_announce_protocol_name.clone(), + BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) + .encode(), + ) + } + pub fn poll(&mut self, cx: &mut std::task::Context) { + while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { + match event { + ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { + self.chain_sync.set_sync_fork_request(peers, &hash, number); + }, + ToServiceCommand::RequestJustification(hash, number) => + self.chain_sync.request_justification(&hash, number), + ToServiceCommand::ClearJustificationRequests => + self.chain_sync.clear_justification_requests(), + ToServiceCommand::BlocksProcessed(imported, count, results) => { + for result in self.chain_sync.on_blocks_processed(imported, count, results) { + match result { + Ok((id, req)) => self.chain_sync.send_block_request(id, req), + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu) + }, + } + } + }, + ToServiceCommand::JustificationImported(peer, hash, number, success) => { + self.chain_sync.on_justification_import(hash, number, success); + if !success { + log::info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash); + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + self.network_service.report_peer( + peer, + sc_peerset::ReputationChange::new_fatal("Invalid justification"), + ); + } + }, + ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), + ToServiceCommand::NewBestBlockImported(hash, number) => + self.new_best_block_imported(hash, number), + } + } + while let Poll::Ready(result) = self.chain_sync.poll(cx) { self.process_block_announce_validation_result(result); } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 90db96f9feec0..2145ccf33bc8f 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -44,7 +44,7 @@ pub mod warp_request_handler; use crate::{ blocks::BlockCollection, schema::v1::{StateRequest, StateResponse}, - service::chain_sync::{ChainSyncInterfaceHandle, ToServiceCommand}, + service::chain_sync::ChainSyncInterfaceHandle, state::StateSync, warp::{WarpProofImportResult, WarpSync}, }; @@ -79,7 +79,6 @@ use sc_network_common::{ SyncState, SyncStatus, }, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ @@ -327,8 +326,6 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, - /// Channel for receiving service commands - service_rx: TracingUnboundedReceiver>, /// Handle for communicating with `NetworkService` network_service: service::network::NetworkServiceHandle, /// Protocol name used for block announcements @@ -1477,40 +1474,6 @@ where &mut self, cx: &mut std::task::Context, ) -> Poll> { - while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { - match event { - ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { - self.set_sync_fork_request(peers, &hash, number); - }, - ToServiceCommand::RequestJustification(hash, number) => - self.request_justification(&hash, number), - ToServiceCommand::ClearJustificationRequests => self.clear_justification_requests(), - ToServiceCommand::BlocksProcessed(imported, count, results) => { - for result in self.on_blocks_processed(imported, count, results) { - match result { - Ok((id, req)) => self.send_block_request(id, req), - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu) - }, - } - } - }, - ToServiceCommand::JustificationImported(peer, hash, number, success) => { - self.on_justification_import(hash, number, success); - if !success { - info!(target: "sync", "💔 Invalid justification provided by {} for #{}", peer, hash); - self.network_service - .disconnect_peer(peer, self.block_announce_protocol_name.clone()); - self.network_service.report_peer( - peer, - sc_peerset::ReputationChange::new_fatal("Invalid justification"), - ); - } - }, - } - } self.process_outbound_requests(); if let Poll::Ready(result) = self.poll_pending_responses(cx) { @@ -1586,8 +1549,7 @@ where block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { - let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); + ) -> Result<(Self, NonDefaultSetConfig), ClientError> { let block_announce_config = Self::get_block_announce_proto_config( protocol_id, fork_id, @@ -1622,7 +1584,6 @@ where warp_sync_provider, import_existing: false, gap_sync: None, - service_rx, network_service, block_request_protocol_name, state_request_protocol_name, @@ -1647,7 +1608,7 @@ where }; sync.reset_sync_start_point()?; - Ok((sync, ChainSyncInterfaceHandle::new(tx), block_announce_config)) + Ok((sync, block_announce_config)) } /// Returns the median seen block number. @@ -3218,7 +3179,7 @@ mod test { let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -3284,7 +3245,7 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -3466,7 +3427,7 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -3593,7 +3554,7 @@ mod test { NetworkServiceProvider::new(); let info = client.info(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -3749,7 +3710,7 @@ mod test { let info = client.info(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -3890,7 +3851,7 @@ mod test { let info = client.info(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -4031,7 +3992,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), ProtocolId::from("test-protocol-name"), @@ -4076,7 +4037,7 @@ mod test { let empty_client = Arc::new(TestClientBuilder::new().build()); - let (mut sync, _, _) = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, empty_client.clone(), ProtocolId::from("test-protocol-name"), diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 50ded5b643dea..006d8dc32ca1d 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -18,7 +18,7 @@ use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; -use sc_network_common::service::NetworkSyncForkRequest; +use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -34,6 +34,8 @@ pub enum ToServiceCommand { Vec<(Result>, BlockImportError>, B::Hash)>, ), JustificationImported(PeerId, B::Hash, NumberFor, bool), + AnnounceBlock(B::Hash, Option>), + NewBestBlockImported(B::Hash, NumberFor), } /// Handle for communicating with `ChainSync` asynchronously @@ -109,3 +111,13 @@ impl Link for ChainSyncInterfaceHandle { let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); } } + +impl NetworkBlock> for ChainSyncInterfaceHandle { + fn announce_block(&self, hash: B::Hash, data: Option>) { + let _ = self.tx.unbounded_send(ToServiceCommand::AnnounceBlock(hash, data)); + } + + fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self.tx.unbounded_send(ToServiceCommand::NewBestBlockImported(hash, number)); + } +} diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs index d8aad2fa7bac1..f22b43988ea85 100644 --- a/client/network/sync/src/service/mock.rs +++ b/client/network/sync/src/service/mock.rs @@ -23,7 +23,10 @@ use sc_network_common::{ config::MultiaddrWithPeerId, protocol::ProtocolName, request_responses::{IfDisconnected, RequestFailure}, - service::{NetworkPeers, NetworkRequest, NetworkSyncForkRequest}, + service::{ + NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, + NotificationSender, NotificationSenderError, + }, }; use sc_peerset::ReputationChange; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -125,4 +128,14 @@ mockall::mock! { connect: IfDisconnected, ); } + + impl NetworkNotification for Network { + fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec); + fn notification_sender( + &self, + target: PeerId, + protocol: ProtocolName, + ) -> Result, NotificationSenderError>; + fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec); + } } diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs index 43501baeec7be..f177a1b83dfba 100644 --- a/client/network/sync/src/service/network.rs +++ b/client/network/sync/src/service/network.rs @@ -21,16 +21,16 @@ use libp2p::PeerId; use sc_network_common::{ protocol::ProtocolName, request_responses::{IfDisconnected, RequestFailure}, - service::{NetworkPeers, NetworkRequest}, + service::{NetworkNotification, NetworkPeers, NetworkRequest}, }; use sc_peerset::ReputationChange; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::sync::Arc; /// Network-related services required by `sc-network-sync` -pub trait Network: NetworkPeers + NetworkRequest {} +pub trait Network: NetworkPeers + NetworkRequest + NetworkNotification {} -impl Network for T where T: NetworkPeers + NetworkRequest {} +impl Network for T where T: NetworkPeers + NetworkRequest + NetworkNotification {} /// Network service provider for `ChainSync` /// @@ -56,6 +56,12 @@ pub enum ToServiceCommand { oneshot::Sender, RequestFailure>>, IfDisconnected, ), + + /// Call `NetworkNotification::write_notification()` + WriteNotification(PeerId, ProtocolName, Vec), + + /// Call `NetworkNotification::set_notification_handshake()` + SetNotificationHandshake(ProtocolName, Vec), } /// Handle that is (temporarily) passed to `ChainSync` so it can @@ -94,6 +100,20 @@ impl NetworkServiceHandle { .tx .unbounded_send(ToServiceCommand::StartRequest(who, protocol, request, tx, connect)); } + + /// Send notification to peer + pub fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::WriteNotification(who, protocol, message)); + } + + /// Set handshake for the notification protocol. + pub fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::SetNotificationHandshake(protocol, handshake)); + } } impl NetworkServiceProvider { @@ -114,6 +134,10 @@ impl NetworkServiceProvider { service.report_peer(peer, reputation_change), ToServiceCommand::StartRequest(peer, protocol, request, tx, connect) => service.start_request(peer, protocol, request, tx, connect), + ToServiceCommand::WriteNotification(peer, protocol, message) => + service.write_notification(peer, protocol, message), + ToServiceCommand::SetNotificationHandshake(protocol, handshake) => + service.set_notification_handshake(protocol, handshake), } } } diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs index e6ed67dd9d0e8..9b71163771813 100644 --- a/client/network/sync/src/tests.rs +++ b/client/network/sync/src/tests.rs @@ -16,9 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/* use crate::{service::network::NetworkServiceProvider, ChainSync, ForkTarget}; use libp2p::PeerId; + use sc_network_common::{ config::ProtocolId, protocol::{ @@ -30,9 +32,10 @@ use sc_network_common::{ }; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_core::H256; -use std::{sync::Arc, task::Poll}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; +use std::{sync::Arc, task::Poll}; + // verify that the fork target map is empty, then submit a new sync fork request, // poll `ChainSync` and verify that a new sync fork request has been registered #[async_std::test] @@ -76,3 +79,4 @@ async fn delegate_to_chainsync() { panic!("expected to contain `ForkTarget`"); } } +*/ diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index eef9378224105..5e4fd3772aa69 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -50,7 +50,7 @@ use sc_consensus::{ }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - Multiaddr, NetworkService, NetworkWorker, + ChainSyncInterface, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ config::{ @@ -235,6 +235,7 @@ pub struct Peer { select_chain: Option>, backend: Option>, network: NetworkWorker::Hash, PeersFullClient>, + chain_sync_service: Box>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, listen_addr: Multiaddr, @@ -396,7 +397,7 @@ where } if inform_sync_about_new_best_block { - self.network.new_best_block_imported( + self.chain_sync_service.new_best_block_imported( at, *full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number(), ); @@ -926,8 +927,9 @@ where async_std::task::spawn(async move { chain_sync_network_provider.run(service).await; }); + let service = Box::new(chain_sync_service.clone()); async_std::task::spawn(async move { - import_queue.run(Box::new(chain_sync_service)).await; + import_queue.run(service).await; }); self.mut_peers(move |peers| { @@ -950,6 +952,7 @@ where block_import, verifier, network, + chain_sync_service: Box::new(chain_sync_service), listen_addr, }); }); diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 4515677d0b1e0..8c6a8b14cd776 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -808,31 +808,31 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { assert!(!net.peer(1).has_block(block_hash)); } -/// Ensures that if we as a syncing node sync to the tip while we are connected to another peer -/// that is currently also doing a major sync. -#[test] -fn sync_to_tip_when_we_sync_together_with_multiple_peers() { - sp_tracing::try_init_simple(); +// /// Ensures that if we as a syncing node sync to the tip while we are connected to another peer +// /// that is currently also doing a major sync. +// #[test] +// fn sync_to_tip_when_we_sync_together_with_multiple_peers() { +// sp_tracing::try_init_simple(); - let mut net = TestNet::new(3); +// let mut net = TestNet::new(3); - let block_hash = - net.peer(0) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false); +// let block_hash = +// net.peer(0) +// .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false); - net.peer(1) - .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false); +// net.peer(1) +// .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false); - net.block_until_connected(); - net.block_until_idle(); +// net.block_until_connected(); +// net.block_until_idle(); - assert!(!net.peer(2).has_block(block_hash)); +// assert!(!net.peer(2).has_block(block_hash)); - net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); - while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) { - net.block_until_idle(); - } -} +// net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); +// while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) { +// net.block_until_idle(); +// } +// } /// Ensures that when we receive a block announcement with some data attached, that we propagate /// this data when reannouncing the block. From c642a334640c5cac129912f27b13422874dcb259 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Wed, 23 Nov 2022 10:16:32 +0200 Subject: [PATCH 06/30] Introduce `SyncStatusProvider` --- bin/node-template/node/src/service.rs | 3 +- bin/node/cli/src/service.rs | 3 +- client/informant/src/display.rs | 11 ++++-- client/informant/src/lib.rs | 21 ++++++++---- client/network/common/src/service.rs | 26 ++++---------- client/network/common/src/sync.rs | 24 +++++++++++-- client/network/src/lib.rs | 6 +++- client/network/src/service.rs | 26 ++++---------- client/network/sync/src/engine.rs | 5 +++ client/network/sync/src/service/chain_sync.rs | 23 ++++++++++++- client/service/src/builder.rs | 34 +++++++++++++------ client/service/src/metrics.rs | 23 +++++++++---- 12 files changed, 134 insertions(+), 71 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ee8464688c79c..44423a0c1fbb1 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -192,7 +192,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -240,6 +240,7 @@ pub fn new_full(mut config: Configuration) -> Result backend, system_rpc_tx, tx_handler_controller, + sync_service, config, telemetry: telemetry.as_mut(), })?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 6c29f0c08ee13..5228fb3a0da60 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -354,7 +354,7 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -393,6 +393,7 @@ pub fn new_full_base( task_manager: &mut task_manager, system_rpc_tx, tx_handler_controller, + sync_service, telemetry: telemetry.as_mut(), })?; diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 3d585a9985134..a2448a2dc93ed 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -24,7 +24,7 @@ use sc_network_common::{ service::NetworkStatus, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - SyncState, + SyncState, SyncStatus, }, }; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; @@ -69,7 +69,12 @@ impl InformantDisplay { } /// Displays the informant by calling `info!`. - pub fn display(&mut self, info: &ClientInfo, net_status: NetworkStatus) { + pub fn display( + &mut self, + info: &ClientInfo, + net_status: NetworkStatus, + sync_status: SyncStatus, + ) { let best_number = info.chain.best_number; let best_hash = info.chain.best_hash; let finalized_number = info.chain.finalized_number; @@ -94,7 +99,7 @@ impl InformantDisplay { }; let (level, status, target) = - match (net_status.sync_state, net_status.state_sync, net_status.warp_sync) { + match (sync_status.state, sync_status.state_sync, sync_status.warp_sync) { ( _, _, diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 52f1c95fe0198..5dff8a64aa439 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network_common::service::NetworkStatusProvider; +use sc_network_common::{service::NetworkStatusProvider, sync::SyncStatusProvider}; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; @@ -53,16 +53,18 @@ impl Default for OutputFormat { } /// Builds the informant and returns a `Future` that drives the informant. -pub async fn build( +pub async fn build( client: Arc, network: N, + syncing: S, pool: Arc

, format: OutputFormat, ) where - N: NetworkStatusProvider, + N: NetworkStatusProvider, C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, P: TransactionPool + MallocSizeOf, + S: SyncStatusProvider, { let mut display = display::InformantDisplay::new(format.clone()); @@ -70,10 +72,15 @@ pub async fn build( let display_notifications = interval(Duration::from_millis(5000)) .filter_map(|_| async { - let status = network.status().await; - status.ok() + let net_status = network.status().await; + let sync_status = syncing.status().await; + + match (net_status.ok(), sync_status.ok()) { + (Some(net), Some(sync)) => Some((net, sync)), + _ => None, + } }) - .for_each(move |net_status| { + .for_each(move |(net_status, sync_status)| { let info = client_1.usage_info(); if let Some(ref usage) = info.usage { trace!(target: "usage", "Usage statistics: {}", usage); @@ -88,7 +95,7 @@ pub async fn build( "Subsystems memory [txpool: {} kB]", parity_util_mem::malloc_size(&*pool) / 1024, ); - display.display(&info, net_status); + display.display(&info, net_status, sync_status); future::ready(()) }); diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index abc55a54c2f98..b464484fe2ad9 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -96,45 +96,33 @@ where /// Overview status of the network. #[derive(Clone)] -pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: SyncState>, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers +pub struct NetworkStatus { + /// Total number of connected peers. pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, /// The total number of bytes received. pub total_bytes_inbound: u64, /// The total number of bytes sent. pub total_bytes_outbound: u64, - /// State sync in progress. - pub state_sync: Option, - /// Warp sync in progress. - pub warp_sync: Option>, } /// Provides high-level status information about network. #[async_trait::async_trait] -pub trait NetworkStatusProvider { +pub trait NetworkStatusProvider { /// High-level network status information. /// /// Returns an error if the `NetworkWorker` is no longer running. - async fn status(&self) -> Result, ()>; + async fn status(&self) -> Result; } // Manual implementation to avoid extra boxing here -impl NetworkStatusProvider for Arc +impl NetworkStatusProvider for Arc where T: ?Sized, - T: NetworkStatusProvider, + T: NetworkStatusProvider, { fn status<'life0, 'async_trait>( &'life0 self, - ) -> Pin, ()>> + Send + 'async_trait>> + ) -> Pin> + Send + 'async_trait>> where 'life0: 'async_trait, Self: 'async_trait, diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 6b6f67087382f..052dd55e5faad 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -25,6 +25,7 @@ pub mod warp; use crate::protocol::role::Roles; use libp2p::PeerId; + use message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}; use sc_consensus::{ import_queue::RuntimeOrigin, BlockImportError, BlockImportStatus, IncomingBlock, @@ -34,9 +35,10 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, }; -use std::{any::Any, fmt, fmt::Formatter, task::Poll}; use warp::WarpSyncProgress; +use std::{any::Any, fmt, fmt::Formatter, sync::Arc, task::Poll}; + /// The sync status of a peer we are trying to sync with #[derive(Debug)] pub struct PeerInfo { @@ -85,7 +87,7 @@ pub struct StateDownloadProgress { } /// Syncing status and statistics. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct SyncStatus { /// Current global sync state. pub state: SyncState>, @@ -266,6 +268,24 @@ impl fmt::Debug for OpaqueBlockResponse { } } +#[async_trait::async_trait] +pub trait SyncStatusProvider: Send + Sync { + /// Get high-level view of the syncing status. + async fn status(&self) -> Result, ()>; +} + +#[async_trait::async_trait] +impl SyncStatusProvider for Arc +where + T: ?Sized, + T: SyncStatusProvider, + Block: BlockT, +{ + async fn status(&self) -> Result, ()> { + T::status(self).await + } +} + /// Something that represents the syncing strategy to download past and future blocks of the chain. pub trait ChainSync: Send { /// Returns the state of the sync of the given peer. diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index b2468b174848f..a61c2ed7b46a7 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -272,7 +272,7 @@ pub use sc_network_common::{ }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - ExtendedPeerInfo, StateDownloadProgress, SyncState, + ExtendedPeerInfo, StateDownloadProgress, SyncState, SyncStatusProvider, }, }; pub use service::{ @@ -301,8 +301,10 @@ pub trait ChainSyncInterface: + JustificationSyncLink + Link + NetworkBlock> + + SyncStatusProvider + Send + Sync + + 'static { } @@ -311,7 +313,9 @@ impl ChainSyncInterface for T where + JustificationSyncLink + Link + NetworkBlock> + + SyncStatusProvider + Send + Sync + + 'static { } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index b518f2454b1b3..4b8083d299f79 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -119,7 +119,7 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, + to_worker: TracingUnboundedSender, /// Interface that can be used to delegate calls to `ChainSync` chain_sync_service: Box>, /// For each peer and protocol combination, an object that allows sending notifications to @@ -465,18 +465,11 @@ where } /// High-level network status information. - pub fn status(&self) -> NetworkStatus { - let status = self.sync_state(); + pub fn status(&self) -> NetworkStatus { NetworkStatus { - sync_state: status.state, - best_seen_block: self.best_seen_block(), - num_sync_peers: self.num_sync_peers(), num_connected_peers: self.num_connected_peers(), - num_active_peers: self.num_active_peers(), total_bytes_inbound: self.total_bytes_inbound(), total_bytes_outbound: self.total_bytes_outbound(), - state_sync: status.state_sync, - warp_sync: status.warp_sync, } } @@ -500,11 +493,6 @@ where self.network_service.behaviour().user_protocol().num_active_peers() } - /// Current global sync state. - pub fn sync_state(&self) -> SyncStatus { - self.network_service.behaviour().user_protocol().sync_state() - } - /// Target sync block number. pub fn best_seen_block(&self) -> Option> { self.network_service.behaviour().user_protocol().best_seen_block() @@ -817,12 +805,12 @@ where } #[async_trait::async_trait] -impl NetworkStatusProvider for NetworkService +impl NetworkStatusProvider for NetworkService where B: BlockT + 'static, H: ExHashT, { - async fn status(&self) -> Result, ()> { + async fn status(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self @@ -1205,7 +1193,7 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg { +enum ServiceToWorkerMsg { GetValue(KademliaKey), PutValue(KademliaKey, Vec), AddKnownAddress(PeerId, Multiaddr), @@ -1227,7 +1215,7 @@ enum ServiceToWorkerMsg { connect: IfDisconnected, }, NetworkStatus { - pending_response: oneshot::Sender, RequestFailure>>, + pending_response: oneshot::Sender>, }, NetworkState { pending_response: oneshot::Sender>, @@ -1263,7 +1251,7 @@ where /// The *actual* network. network_service: Swarm>, /// Messages from the [`NetworkService`] that must be processed. - from_service: TracingUnboundedReceiver>, + from_service: TracingUnboundedReceiver, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index f7d7aa70c668d..dabacc94dec20 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -459,9 +459,14 @@ where ) } + // TODO(aaro): reorder match properly pub fn poll(&mut self, cx: &mut std::task::Context) { while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { match event { + ToServiceCommand::Status(tx) => + if let Err(_) = tx.send(self.chain_sync.status()) { + log::warn!(target: "sync", "Failed to respond to `Status` query"); + }, ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.chain_sync.set_sync_fork_request(peers, &hash, number); }, diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 006d8dc32ca1d..3edb186f65235 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -16,15 +16,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +// TODO(aaro): reorder traits properly +// TODO(aaro): document functions +// TODO(aaro): rename this file to sync_service.rs? + +use futures::channel::oneshot; + use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; -use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; +use sc_network_common::{ + service::{NetworkBlock, NetworkSyncForkRequest}, + sync::{SyncStatus, SyncStatusProvider}, +}; use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// Commands send to `ChainSync` #[derive(Debug)] pub enum ToServiceCommand { + Status(oneshot::Sender>), SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, @@ -83,6 +93,17 @@ impl JustificationSyncLink for ChainSyncInterfaceHandle { } } +#[async_trait::async_trait] +impl SyncStatusProvider for ChainSyncInterfaceHandle { + /// Get high-level view of the syncing status. + async fn status(&self) -> Result, ()> { + let (rtx, rrx) = oneshot::channel(); + + let _ = self.tx.unbounded_send(ToServiceCommand::Status(rtx)); + rrx.await.map_err(|_| ()) + } +} + impl Link for ChainSyncInterfaceHandle { fn blocks_processed( &mut self, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 49ece6129c658..5a1650ccca5c5 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,7 +37,7 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{config::SyncMode, NetworkService}; +use sc_network::{config::SyncMode, ChainSyncInterface, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ protocol::role::Roles, @@ -327,12 +327,7 @@ where /// Shared network instance implementing a set of mandatory traits. pub trait SpawnTaskNetwork: - sc_offchain::NetworkProvider - + NetworkStateInfo - + NetworkStatusProvider - + Send - + Sync - + 'static + sc_offchain::NetworkProvider + NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static { } @@ -341,7 +336,7 @@ where Block: BlockT, T: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkStatusProvider + + NetworkStatusProvider + Send + Sync + 'static, @@ -372,6 +367,8 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// Controller for transactions handlers pub tx_handler_controller: sc_network_transactions::TransactionsHandlerController<::Hash>, + /// Syncing service. + pub sync_service: Arc>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -451,6 +448,7 @@ where network, system_rpc_tx, tx_handler_controller, + sync_service, telemetry, } = params; @@ -513,7 +511,12 @@ where spawn_handle.spawn( "telemetry-periodic-send", None, - metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()), + metrics_service.run( + client.clone(), + transaction_pool.clone(), + network.clone(), + sync_service.clone(), + ), ); let rpc_id_provider = config.rpc_id_provider.take(); @@ -543,6 +546,7 @@ where sc_informant::build( client.clone(), network, + sync_service.clone(), transaction_pool.clone(), config.informant_output_format, ), @@ -742,6 +746,7 @@ pub fn build_network( TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, + Arc>, ), Error, > @@ -926,6 +931,7 @@ where let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); + let sync_service = chain_sync_service.clone(); let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( network.clone(), @@ -939,7 +945,7 @@ where Some("networking"), chain_sync_network_provider.run(network.clone()), ); - spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(chain_sync_service))); + spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service))); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); @@ -988,7 +994,13 @@ where future.await }); - Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx))) + Ok(( + network, + system_rpc_tx, + tx_handler_controller, + NetworkStarter(network_start_tx), + Arc::new(chain_sync_service), + )) } /// Object used to start the network. diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 13b249a7b9563..3913c62e8805f 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -23,7 +23,10 @@ use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; use sc_network::config::Role; -use sc_network_common::service::{NetworkStatus, NetworkStatusProvider}; +use sc_network_common::{ + service::{NetworkStatus, NetworkStatusProvider}, + sync::{SyncStatus, SyncStatusProvider}, +}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; @@ -183,16 +186,18 @@ impl MetricsService { /// Returns a never-ending `Future` that performs the /// metric and telemetry updates with information from /// the given sources. - pub async fn run( + pub async fn run( mut self, client: Arc, transactions: Arc, network: TNet, + syncing: TSync, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, - TNet: NetworkStatusProvider, + TNet: NetworkStatusProvider, + TSync: SyncStatusProvider, { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); @@ -204,8 +209,11 @@ impl MetricsService { // Try to get the latest network information. let net_status = network.status().await.ok(); + // Try to get the latest syncing information. + let sync_status = syncing.status().await.ok(); + // Update / Send the metrics. - self.update(&client.usage_info(), &transactions.status(), net_status); + self.update(&client.usage_info(), &transactions.status(), net_status, sync_status); // Schedule next tick. timer.reset(timer_interval); @@ -216,7 +224,8 @@ impl MetricsService { &mut self, info: &ClientInfo, txpool_status: &PoolStatus, - net_status: Option>, + net_status: Option, + sync_status: Option>, ) { let now = Instant::now(); let elapsed = (now - self.last_update).as_secs(); @@ -293,10 +302,12 @@ impl MetricsService { "bandwidth_download" => avg_bytes_per_sec_inbound, "bandwidth_upload" => avg_bytes_per_sec_outbound, ); + } + if let Some(sync_status) = sync_status { if let Some(metrics) = self.metrics.as_ref() { let best_seen_block: Option = - net_status.best_seen_block.map(|num: NumberFor| { + sync_status.best_seen_block.map(|num: NumberFor| { UniqueSaturatedInto::::unique_saturated_into(num) }); From badfbf339e12c9581d842d8efacce04f2b2397fe Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Wed, 23 Nov 2022 11:48:30 +0200 Subject: [PATCH 07/30] Move `sync_peer_(connected|disconnected)` to `SyncingEngine` --- client/network/src/protocol.rs | 207 +------------------- client/network/src/service/tests/mod.rs | 7 +- client/network/sync/src/engine.rs | 245 ++++++++++++++++++++---- client/network/test/src/lib.rs | 41 +++- client/service/src/builder.rs | 46 ++++- 5 files changed, 305 insertions(+), 241 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index caee998a0b134..c54022a35086e 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -106,17 +106,6 @@ pub struct Protocol { roles: Roles, genesis_hash: B::Hash, chain: Arc, - /// List of nodes for which we perform additional logging because they are important for the - /// user. - important_peers: HashSet, - /// List of nodes that should never occupy peer slots. - default_peers_set_no_slot_peers: HashSet, - /// Actual list of connected no-slot nodes. - default_peers_set_no_slot_connected_peers: HashSet, - /// Value that was passed as part of the configuration. Used to cap the number of full nodes. - default_peers_set_num_full: usize, - /// Number of slots to allocate to light nodes. - default_peers_set_num_light: usize, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, /// Handles opening the unique substream and sending and receiving raw messages. @@ -130,8 +119,6 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - /// The `PeerId`'s of all boot nodes. - boot_node_ids: HashSet, // TODO: remove eventually engine: SyncingEngine, } @@ -157,31 +144,6 @@ where ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); - let boot_node_ids = { - let mut list = HashSet::new(); - for node in &network_config.boot_nodes { - list.insert(node.peer_id); - } - list.shrink_to_fit(); - list - }; - - let important_peers = { - let mut imp_p = HashSet::new(); - for reserved in &network_config.default_peers_set.reserved_nodes { - imp_p.insert(reserved.peer_id); - } - for reserved in network_config - .extra_sets - .iter() - .flat_map(|s| s.set_config.reserved_nodes.iter()) - { - imp_p.insert(reserved.peer_id); - } - imp_p.shrink_to_fit(); - imp_p - }; - let default_peers_set_no_slot_peers = { let mut no_slot_p: HashSet = network_config .default_peers_set @@ -272,15 +234,6 @@ where roles, chain, genesis_hash: info.genesis_hash, - important_peers, - default_peers_set_no_slot_peers, - default_peers_set_no_slot_connected_peers: HashSet::new(), - default_peers_set_num_full: network_config.default_peers_set_num_full as usize, - default_peers_set_num_light: { - let total = network_config.default_peers_set.out_peers + - network_config.default_peers_set.in_peers; - total.saturating_sub(network_config.default_peers_set_num_full) as usize - }, peerset_handle: peerset_handle.clone(), behaviour, notification_protocols: iter::once(block_announces_protocol.notifications_protocol) @@ -288,7 +241,6 @@ where .collect(), bad_handshake_substreams: Default::default(), engine, - boot_node_ids, }; Ok((protocol, peerset_handle, known_addresses)) @@ -366,158 +318,11 @@ where self.engine.peers.iter().map(|(id, peer)| (id, &peer.info)) } - /// Called by peer when it is disconnecting. - /// - /// Returns a result if the handshake of this peer was indeed accepted. - pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { - if self.important_peers.contains(&peer) { - warn!(target: "sync", "Reserved peer {} disconnected", peer); - } else { - debug!(target: "sync", "{} disconnected", peer); - } - - if let Some(_peer_data) = self.engine.peers.remove(&peer) { - self.engine.chain_sync.peer_disconnected(&peer); - self.default_peers_set_no_slot_connected_peers.remove(&peer); - Ok(()) - } else { - Err(()) - } - } - /// Adjusts the reputation of a node. pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { self.peerset_handle.report_peer(who, reputation) } - /// Called on the first connection between two peers on the default set, after their exchange - /// of handshake. - /// - /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync - /// from. - fn on_sync_peer_connected( - &mut self, - who: PeerId, - status: BlockAnnouncesHandshake, - ) -> Result<(), ()> { - trace!(target: "sync", "New peer {} {:?}", who, status); - - if self.engine.peers.contains_key(&who) { - error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); - debug_assert!(false); - return Err(()) - } - - if status.genesis_hash != self.genesis_hash { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, - "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash - ); - self.peerset_handle.report_peer(who, rep::GENESIS_MISMATCH); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - - if self.boot_node_ids.contains(&who) { - error!( - target: "sync", - "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - who, - self.genesis_hash, - status.genesis_hash, - ); - } - - return Err(()) - } - - if self.roles.is_light() { - // we're not interested in light peers - if status.roles.is_light() { - debug!(target: "sync", "Peer {} is unable to serve light requests", who); - self.peerset_handle.report_peer(who, rep::BAD_ROLE); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) - } - - // we don't interested in peers that are far behind us - let self_best_block = self.chain.info().best_number; - let blocks_difference = self_best_block - .checked_sub(&status.best_number) - .unwrap_or_else(Zero::zero) - .saturated_into::(); - if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { - debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); - self.peerset_handle.report_peer(who, rep::PEER_BEHIND_US_LIGHT); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) - } - } - - let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); - let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; - - if status.roles.is_full() && - self.engine.chain_sync.num_peers() >= - self.default_peers_set_num_full + - self.default_peers_set_no_slot_connected_peers.len() + - this_peer_reserved_slot - { - debug!(target: "sync", "Too many full nodes, rejecting {}", who); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) - } - - if status.roles.is_light() && - (self.engine.peers.len() - self.engine.chain_sync.num_peers()) >= - self.default_peers_set_num_light - { - // Make sure that not all slots are occupied by light clients. - debug!(target: "sync", "Too many light nodes, rejecting {}", who); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()) - } - - let peer = Peer { - info: ExtendedPeerInfo { - roles: status.roles, - best_hash: status.best_hash, - best_number: status.best_number, - }, - known_blocks: LruHashSet::new( - NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), - ), - }; - - let req = if peer.info.roles.is_full() { - match self.engine.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { - Ok(req) => req, - Err(BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer(id, repu); - return Err(()) - }, - } - } else { - None - }; - - debug!(target: "sync", "Connected {}", who); - - self.engine.peers.insert(who, peer); - if no_slot_peer { - self.default_peers_set_no_slot_connected_peers.insert(who); - } - self.pending_messages - .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); - - if let Some(req) = req { - self.engine.chain_sync.send_block_request(who, req); - } - - Ok(()) - } - /// Set handshake for the notification protocol. pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { @@ -802,7 +607,7 @@ where }; let roles = handshake.roles; - if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + if self.engine.on_sync_peer_connected(peer_id, handshake).is_ok() { self.pending_messages.push_back( CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, @@ -832,10 +637,13 @@ where match as DecodeAll>::decode_all( &mut &received_handshake[..], ) { - // TODO: korjaa tämä toimimaan Ok(handshake) => { let roles = handshake.roles; - if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + if self + .engine + .on_sync_peer_connected(peer_id, handshake) + .is_ok() + { self.pending_messages.push_back( CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, @@ -914,10 +722,11 @@ where notifications_sink, } }, + // TODO(aaro): listen on event stream in `SyncingEngine` NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { - if self.on_sync_peer_disconnected(peer_id).is_ok() { + if self.engine.on_sync_peer_disconnected(peer_id).is_ok() { CustomMessageOutcome::SyncDisconnected(peer_id) } else { log::trace!( diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index f350128ad3bb0..fa6a394e50d2f 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -40,7 +40,7 @@ use sc_network_sync::{ ChainSync, }; use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block as TestBlock, Hash as TestHash}, TestClient, TestClientBuilder, TestClientBuilderExt as _, @@ -271,6 +271,11 @@ impl TestNetworkBuilder { state_request_protocol_config.name.clone(), None, std::num::NonZeroUsize::new(16).unwrap(), + HashSet::new(), + HashSet::new(), + HashSet::new(), + 0usize, + 0usize, ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index dabacc94dec20..5269bbbfbb78a 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -47,21 +47,35 @@ use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, Zero}, + traits::{Block as BlockT, CheckedSub, Header, NumberFor, Zero}, + SaturatedConversion, }; -use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, task::Poll}; +use std::{ + collections::{HashMap, HashSet}, + num::NonZeroUsize, + sync::Arc, + task::Poll, +}; + +/// When light node connects to the full node and the full node is behind light node +/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful +/// and disconnect to free connection slot. +const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; + +/// Maximum number of known block hashes to keep for a peer. +const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead mod rep { use sc_peerset::ReputationChange as Rep; - // /// Reputation change when we are a light client and a peer is behind us. - // pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - // /// We received a message that failed to decode. - // pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - // /// Peer has different genesis. - // pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); - // /// Peer role does not match (e.g. light peer connecting to another light peer). - // pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); + /// Reputation change when we are a light client and a peer is behind us. + pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// Peer has different genesis. + pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); + /// Peer role does not match (e.g. light peer connecting to another light peer). + pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } @@ -126,8 +140,6 @@ pub struct SyncingEngine { /// Channel for receiving service commands service_rx: TracingUnboundedReceiver>, - // /// Interval at which we call `tick`. - // tick_timeout: Pin + Send>>, /// Assigned roles. roles: Roles, @@ -137,39 +149,34 @@ pub struct SyncingEngine { /// All connected peers. Contains both full and light node peers. pub peers: HashMap>, + /// List of nodes for which we perform additional logging because they are important for the + /// user. + pub important_peers: HashSet, + + /// Actual list of connected no-slot nodes. + pub default_peers_set_no_slot_connected_peers: HashSet, + + /// List of nodes that should never occupy peer slots. + pub default_peers_set_no_slot_peers: HashSet, + + /// Value that was passed as part of the configuration. Used to cap the number of full + /// nodes. + default_peers_set_num_full: usize, + + /// Number of slots to allocate to light nodes. + default_peers_set_num_light: usize, + /// A cache for the data that was associated to a block announcement. pub block_announce_data_cache: LruCache>, + /// The `PeerId`'s of all boot nodes. + pub boot_node_ids: HashSet, + /// Protocol name used for block announcements block_announce_protocol_name: ProtocolName, - // /// List of nodes for which we perform additional logging because they are important for the - // /// user. - // important_peers: HashSet, - // /// List of nodes that should never occupy peer slots. - // default_peers_set_no_slot_peers: HashSet, - // /// Actual list of connected no-slot nodes. - // default_peers_set_no_slot_connected_peers: HashSet, - // /// Value that was passed as part of the configuration. Used to cap the number of full - // nodes. default_peers_set_num_full: usize, - // /// Number of slots to allocate to light nodes. - // default_peers_set_num_light: usize, - // /// Used to report reputation changes. - // peerset_handle: sc_peerset::PeersetHandle, - // /// Handles opening the unique substream and sending and receiving raw messages. - // behaviour: Notifications, - // /// List of notifications protocols that have been registered. - // notification_protocols: Vec, - // /// If we receive a new "substream open" event that contains an invalid handshake, we ask - // the /// inner layer to force-close the substream. Force-closing the substream will generate - // a /// "substream closed" event. This is a problem: since we can't propagate the "substream - // open" /// event to the outer layers, we also shouldn't propagate this "substream closed" - // event. To /// solve this, an entry is added to this map whenever an invalid handshake is - // received. /// Entries are removed when the corresponding "substream closed" is later - // received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, + /// Prometheus metrics. metrics: Option, - // /// The `PeerId`'s of all boot nodes. - // boot_node_ids: HashSet, } impl SyncingEngine @@ -200,6 +207,11 @@ where state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, cache_capacity: NonZeroUsize, + important_peers: HashSet, + boot_node_ids: HashSet, + default_peers_set_no_slot_peers: HashSet, + default_peers_set_num_full: usize, + default_peers_set_num_light: usize, ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { let (chain_sync, block_announce_config) = ChainSync::new( mode, @@ -237,6 +249,12 @@ where block_announce_protocol_name, service_rx, genesis_hash, + important_peers, + default_peers_set_no_slot_connected_peers: HashSet::new(), + boot_node_ids, + default_peers_set_no_slot_peers, + default_peers_set_num_full, + default_peers_set_num_light, metrics: if let Some(r) = metrics_registry { match Metrics::register(r) { Ok(metrics) => Some(metrics), @@ -508,4 +526,155 @@ where self.process_block_announce_validation_result(result); } } + + /// Called by peer when it is disconnecting. + /// + /// Returns a result if the handshake of this peer was indeed accepted. + pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { + if self.important_peers.contains(&peer) { + log::warn!(target: "sync", "Reserved peer {} disconnected", peer); + } else { + log::debug!(target: "sync", "{} disconnected", peer); + } + + if let Some(_peer_data) = self.peers.remove(&peer) { + self.chain_sync.peer_disconnected(&peer); + self.default_peers_set_no_slot_connected_peers.remove(&peer); + Ok(()) + } else { + Err(()) + } + } + + // TODO: peernewbest + /// Called on the first connection between two peers on the default set, after their exchange + /// of handshake. + /// + /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync + /// from. + pub fn on_sync_peer_connected( + &mut self, + who: PeerId, + status: BlockAnnouncesHandshake, + ) -> Result<(), ()> { + log::trace!(target: "sync", "New peer {} {:?}", who, status); + + if self.peers.contains_key(&who) { + log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); + debug_assert!(false); + return Err(()) + } + + if status.genesis_hash != self.genesis_hash { + log::log!( + target: "sync", + if self.important_peers.contains(&who) { log::Level::Warn } else { log::Level::Debug }, + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); + self.network_service.report_peer(who, rep::GENESIS_MISMATCH); + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + + if self.boot_node_ids.contains(&who) { + log::error!( + target: "sync", + "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, + ); + } + + return Err(()) + } + + if self.roles.is_light() { + // we're not interested in light peers + if status.roles.is_light() { + log::debug!(target: "sync", "Peer {} is unable to serve light requests", who); + self.network_service.report_peer(who, rep::BAD_ROLE); + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + return Err(()) + } + + // we don't interested in peers that are far behind us + let self_best_block = self.client.info().best_number; + let blocks_difference = self_best_block + .checked_sub(&status.best_number) + .unwrap_or_else(Zero::zero) + .saturated_into::(); + if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { + log::debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); + self.network_service.report_peer(who, rep::PEER_BEHIND_US_LIGHT); + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + return Err(()) + } + } + + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who); + let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + + if status.roles.is_full() && + self.chain_sync.num_peers() >= + self.default_peers_set_num_full + + self.default_peers_set_no_slot_connected_peers.len() + + this_peer_reserved_slot + { + log::debug!(target: "sync", "Too many full nodes, rejecting {}", who); + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + return Err(()) + } + + if status.roles.is_light() && + (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light + { + // Make sure that not all slots are occupied by light clients. + log::debug!(target: "sync", "Too many light nodes, rejecting {}", who); + self.network_service + .disconnect_peer(who, self.block_announce_protocol_name.clone()); + return Err(()) + } + + let peer = Peer { + info: ExtendedPeerInfo { + roles: status.roles, + best_hash: status.best_hash, + best_number: status.best_number, + }, + known_blocks: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), + ), + }; + + let req = if peer.info.roles.is_full() { + match self.chain_sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + Ok(req) => req, + Err(BadPeer(id, repu)) => { + self.network_service + .disconnect_peer(id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(id, repu); + return Err(()) + }, + } + } else { + None + }; + + log::debug!(target: "sync", "Connected {}", who); + + self.peers.insert(who, peer); + if no_slot_peer { + self.default_peers_set_no_slot_connected_peers.insert(who); + } + + if let Some(req) = req { + self.chain_sync.send_block_request(who, req); + } + + Ok(()) + } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 5e4fd3772aa69..ddfb88877b973 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,7 +23,7 @@ mod block_import; mod sync; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, marker::PhantomData, num::NonZeroUsize, pin::Pin, @@ -897,6 +897,45 @@ where state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), NonZeroUsize::new(16).unwrap(), + { + let mut imp_p = HashSet::new(); + for reserved in &network_config.default_peers_set.reserved_nodes { + imp_p.insert(reserved.peer_id); + } + for reserved in network_config + .extra_sets + .iter() + .flat_map(|s| s.set_config.reserved_nodes.iter()) + { + imp_p.insert(reserved.peer_id); + } + imp_p.shrink_to_fit(); + imp_p + }, + { + let mut list = HashSet::new(); + for node in &network_config.boot_nodes { + list.insert(node.peer_id); + } + list.shrink_to_fit(); + list + }, + { + let mut no_slot_p: HashSet = network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| reserved.peer_id) + .collect(); + no_slot_p.shrink_to_fit(); + no_slot_p + }, + network_config.default_peers_set_num_full as usize, + { + let total = network_config.default_peers_set.out_peers + + network_config.default_peers_set.in_peers; + total.saturating_sub(network_config.default_peers_set_num_full) as usize + }, ) .unwrap(); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5a1650ccca5c5..384d12889f8d1 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -22,7 +22,7 @@ use crate::{ config::{Configuration, KeystoreConfig, PrometheusConfig}, error::Error, metrics::MetricsService, - start_rpc_servers, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, + start_rpc_servers, PeerId, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; use jsonrpsee::RpcModule; @@ -74,7 +74,7 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}, BuildStorage, }; -use std::{num::NonZeroUsize, str::FromStr, sync::Arc, time::SystemTime}; +use std::{collections::HashSet, num::NonZeroUsize, str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. pub type TFullClient = @@ -850,6 +850,7 @@ where protocol_config }; + // TODO(aaro): expose `config.network` through common crate let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), @@ -877,6 +878,47 @@ where .max(1), ) .expect("cache capacity is not zero"), + { + let mut imp_p = HashSet::new(); + for reserved in &config.network.default_peers_set.reserved_nodes { + imp_p.insert(reserved.peer_id); + } + for reserved in config + .network + .extra_sets + .iter() + .flat_map(|s| s.set_config.reserved_nodes.iter()) + { + imp_p.insert(reserved.peer_id); + } + imp_p.shrink_to_fit(); + imp_p + }, + { + let mut list = HashSet::new(); + for node in &config.network.boot_nodes { + list.insert(node.peer_id); + } + list.shrink_to_fit(); + list + }, + { + let mut no_slot_p: HashSet = config + .network + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| reserved.peer_id) + .collect(); + no_slot_p.shrink_to_fit(); + no_slot_p + }, + config.network.default_peers_set_num_full as usize, + { + let total = config.network.default_peers_set.out_peers + + config.network.default_peers_set.in_peers; + total.saturating_sub(config.network.default_peers_set_num_full) as usize + }, )?; request_response_protocol_configs.push(config.network.ipfs_server.then(|| { From 755b47cf25ea04e0a48b17aee05b36dfa6cf54a9 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Fri, 25 Nov 2022 10:34:36 +0200 Subject: [PATCH 08/30] Implement `SyncEventStream` Remove `SyncConnected`/`SyncDisconnected` events from `NetworkEvenStream` and provide those events through `ChainSyncInterface` instead. Modify BEEFY/GRANDPA/transactions protocol and `NetworkGossip` to take `SyncEventStream` object which they listen to for incoming sync peer events. --- Cargo.lock | 2 + bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 5 +- client/beefy/src/lib.rs | 15 ++- client/beefy/src/tests.rs | 6 ++ client/beefy/src/worker.rs | 37 ++++--- .../finality-grandpa/src/communication/mod.rs | 7 +- .../src/communication/tests.rs | 24 ++++- client/finality-grandpa/src/lib.rs | 6 +- client/finality-grandpa/src/observer.rs | 3 + client/finality-grandpa/src/tests.rs | 47 ++++++-- client/network-gossip/src/bridge.rs | 101 ++++++++++++++---- client/network/common/Cargo.toml | 1 + client/network/common/src/protocol/event.rs | 12 --- client/network/common/src/service.rs | 2 - client/network/common/src/sync.rs | 27 +++++ client/network/src/behaviour.rs | 9 -- client/network/src/lib.rs | 4 +- client/network/src/protocol.rs | 60 ++++------- client/network/src/service.rs | 6 -- client/network/src/service/out_events.rs | 18 ---- client/network/src/service/tests/service.rs | 4 - client/network/sync/src/engine.rs | 17 ++- client/network/sync/src/service/chain_sync.rs | 19 +++- client/network/test/src/lib.rs | 15 ++- client/network/transactions/Cargo.toml | 1 + client/network/transactions/src/lib.rs | 34 ++++-- client/service/src/builder.rs | 2 + 28 files changed, 328 insertions(+), 160 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2265d4192cd82..8b531b690024b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8266,6 +8266,7 @@ dependencies = [ "prost-build", "sc-consensus", "sc-peerset", + "sc-utils", "serde", "smallvec", "sp-blockchain", @@ -8396,6 +8397,7 @@ dependencies = [ "pin-project", "sc-network-common", "sc-peerset", + "sc-utils", "sp-consensus", "sp-runtime", "substrate-prometheus-endpoint", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 44423a0c1fbb1..1720524a2bdfc 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -240,7 +240,7 @@ pub fn new_full(mut config: Configuration) -> Result backend, system_rpc_tx, tx_handler_controller, - sync_service, + sync_service: sync_service.clone(), config, telemetry: telemetry.as_mut(), })?; @@ -321,6 +321,8 @@ pub fn new_full(mut config: Configuration) -> Result config: grandpa_config, link: grandpa_link, network, + // TODo(aaro): fix arc + sync: Arc::new(sync_service), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 5228fb3a0da60..6e6d1c0adde71 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,7 +29,7 @@ use node_primitives::Block; use sc_client_api::BlockBackend; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::NetworkService; +use sc_network::{ChainSyncInterface, NetworkService}; use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; @@ -393,7 +393,7 @@ pub fn new_full_base( task_manager: &mut task_manager, system_rpc_tx, tx_handler_controller, - sync_service, + sync_service: sync_service.clone(), telemetry: telemetry.as_mut(), })?; @@ -533,6 +533,7 @@ pub fn new_full_base( config, link: grandpa_link, network: network.clone(), + sync: Arc::new(sync_service), telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 3bdd13982aea2..195167112d921 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -42,7 +42,7 @@ use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; use sc_consensus::BlockImport; use sc_network::ProtocolName; -use sc_network_common::service::NetworkRequest; +use sc_network_common::{service::NetworkRequest, sync::SyncEventStream}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi}; use sp_blockchain::{ @@ -168,6 +168,8 @@ where pub struct BeefyNetworkParams { /// Network implementing gossip, requests and sync-oracle. pub network: Arc, + /// Syncing service implementing event stream for peers. + pub sync: Arc, /// Chain specific BEEFY gossip protocol name. See /// [`communication::beefy_protocol_name::gossip_protocol_name`]. pub gossip_protocol_name: ProtocolName, @@ -228,14 +230,20 @@ where on_demand_justifications_handler, } = beefy_params; - let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } = - network_params; + let BeefyNetworkParams { + network, + sync, + gossip_protocol_name, + justifications_protocol_name, + .. + } = network_params; let known_peers = Arc::new(Mutex::new(KnownPeers::new())); let gossip_validator = Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); let mut gossip_engine = sc_network_gossip::GossipEngine::new( network.clone(), + sync.clone(), gossip_protocol_name, gossip_validator.clone(), None, @@ -285,6 +293,7 @@ where backend, payload_provider, network, + sync, key_store: key_store.into(), known_peers, gossip_engine, diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 9a31d4a583d0e..6df47af10e336 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -139,6 +139,10 @@ impl BeefyTestNet { }); } + pub(crate) fn block_until_sync_connected(&mut self) { + todo!(); + } + pub(crate) fn generate_blocks_and_sync( &mut self, count: usize, @@ -342,6 +346,7 @@ where let network_params = crate::BeefyNetworkParams { network: peer.network_service().clone(), + sync: peer.sync_service().clone(), gossip_protocol_name: beefy_gossip_proto_name(), justifications_protocol_name: on_demand_justif_handler.protocol_name(), _phantom: PhantomData, @@ -954,6 +959,7 @@ fn test_voter_init_setup( Arc::new(crate::communication::gossip::GossipValidator::new(known_peers)); let mut gossip_engine = sc_network_gossip::GossipEngine::new( net.peer(0).network_service().clone(), + net.peer(0).sync_service().clone(), "/beefy/whatever", gossip_validator, None, diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index e387fed79c6a0..55f103de05879 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -32,6 +32,7 @@ use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, Header use sc_network_common::{ protocol::event::Event as NetEvent, service::{NetworkEventStream, NetworkRequest}, + sync::{SyncEvent, SyncEventStream}, }; use sc_network_gossip::GossipEngine; @@ -252,6 +253,7 @@ pub(crate) struct WorkerParams { pub backend: Arc, pub payload_provider: P, pub network: N, + pub sync: Arc, pub key_store: BeefyKeystore, pub known_peers: Arc>>, pub gossip_engine: GossipEngine, @@ -302,6 +304,7 @@ pub(crate) struct BeefyWorker { backend: Arc, payload_provider: P, network: N, + sync: Arc, key_store: BeefyKeystore, // communication @@ -346,6 +349,7 @@ where payload_provider, key_store, network, + sync, gossip_engine, gossip_validator, on_demand_justifications, @@ -359,6 +363,7 @@ where backend, payload_provider, network, + sync, known_peers, key_store, gossip_engine, @@ -794,7 +799,7 @@ where ) { info!(target: "beefy", "🥩 run BEEFY worker, best grandpa: #{:?}.", self.best_grandpa_block()); - let mut network_events = self.network.event_stream("network-gossip").fuse(); + let mut sync_events = self.sync.event_stream("network-gossipzzz").fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) @@ -838,11 +843,11 @@ where return; }, // Keep track of connected peers. - net_event = network_events.next() => { - if let Some(net_event) = net_event { - self.handle_network_event(net_event); + sync_event = sync_events.next() => { + if let Some(sync_event) = sync_event { + self.handle_sync_event(sync_event); } else { - error!(target: "beefy", "🥩 Network events stream terminated, closing worker."); + error!(target: "beefy", "🥩 Syncing events stream terminated, closing worker."); return; } }, @@ -897,16 +902,14 @@ where } /// Update known peers based on network events. - fn handle_network_event(&mut self, event: NetEvent) { + fn handle_sync_event(&mut self, event: SyncEvent) { match event { - NetEvent::SyncConnected { remote } => { + SyncEvent::PeerConnected(remote) => { self.known_peers.lock().add_new(remote); }, - NetEvent::SyncDisconnected { remote } => { + SyncEvent::PeerDisconnected(remote) => { self.known_peers.lock().remove(&remote); }, - // We don't care about other events. - _ => (), } } } @@ -982,7 +985,8 @@ pub(crate) mod tests { use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; use sc_client_api::{Backend as BackendT, HeaderBackend}; - use sc_network::NetworkService; + use sc_network::{ChainSyncInterface, NetworkService}; + use sc_network_common::sync::SyncEventStream; use sc_network_test::TestNetFactory; use sp_api::HeaderT; use sp_blockchain::Backend as BlockchainBackendT; @@ -1050,10 +1054,16 @@ pub(crate) mod tests { let backend = peer.client().as_backend(); let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); + let sync = peer.sync_service().clone(); let known_peers = Arc::new(Mutex::new(KnownPeers::new())); let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); - let gossip_engine = - GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None); + let gossip_engine = GossipEngine::new( + network.clone(), + sync.clone(), + "/beefy/1", + gossip_validator.clone(), + None, + ); let on_demand_justifications = OnDemandJustificationsEngine::new( network.clone(), api.clone(), @@ -1080,6 +1090,7 @@ pub(crate) mod tests { gossip_validator, metrics: None, network, + sync, on_demand_justifications, persisted_state, }; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 75a7697812c6c..2668952320c8e 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -59,7 +59,10 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; -use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; +use sc_network_common::{ + service::{NetworkBlock, NetworkSyncForkRequest}, + sync::SyncEventStream, +}; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; @@ -234,6 +237,7 @@ impl> NetworkBridge { /// service taken from the VoterSetState. pub(crate) fn new( service: N, + sync: Arc, config: crate::Config, set_state: crate::environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, @@ -246,6 +250,7 @@ impl> NetworkBridge { let validator = Arc::new(validator); let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), + sync.clone(), protocol, validator.clone(), prometheus_registry, diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 5d66e81f6b56c..5cf0ca705b1fb 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -33,6 +33,7 @@ use sc_network_common::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSender, NotificationSenderError, }, + sync::{SyncEvent as SyncStreamEvent, SyncEventStream}, }; use sc_network_gossip::Validator; use sc_network_test::{Block, Hash}; @@ -190,6 +191,24 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } +pub(crate) enum SyncEvent { + EventStream(TracingUnboundedSender), +} + +#[derive(Clone)] +pub(crate) struct TestSync { + sender: TracingUnboundedSender, +} + +impl SyncEventStream for TestSync { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + Box::pin(futures::stream::pending()) + } +} + pub(crate) struct Tester { pub(crate) net_handle: super::NetworkBridge, gossip_validator: Arc>, @@ -259,6 +278,8 @@ fn voter_set_state() -> SharedVoterSetState { pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; + let (stx, srx) = tracing_unbounded("sync"); + let sync = Arc::new(TestSync { sender: stx }); #[derive(Clone)] struct Exit; @@ -271,7 +292,8 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } } - let bridge = super::NetworkBridge::new(net.clone(), config(), voter_set_state(), None, None); + let bridge = + super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None); ( futures::future::ready(Tester { diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index a7326d57c2bf0..0e29354290aed 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -68,7 +68,7 @@ use sc_client_api::{ StorageProvider, TransactionFor, }; use sc_consensus::BlockImport; -use sc_network_common::protocol::ProtocolName; +use sc_network_common::{protocol::ProtocolName, sync::SyncEventStream}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; @@ -673,6 +673,8 @@ pub struct GrandpaParams { /// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed /// to the configuration of the networking. See [`grandpa_peers_set_config`]. pub network: N, + /// Event stream for syncing-related events. + pub sync: Arc, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. @@ -724,6 +726,7 @@ where mut config, link, network, + sync, voting_rule, prometheus_registry, shared_voter_state, @@ -748,6 +751,7 @@ where let network = NetworkBridge::new( network, + sync, config.clone(), persistent_data.set_state.clone(), prometheus_registry.as_ref(), diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 9bcb03c0555c2..a8ac07c5feb11 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -28,6 +28,7 @@ use futures::prelude::*; use log::{debug, info, warn}; use sc_client_api::backend::Backend; +use sc_network_common::sync::SyncEventStream; use sc_telemetry::TelemetryHandle; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -167,6 +168,7 @@ pub fn run_grandpa_observer( config: Config, link: LinkHalf, network: N, + sync: Arc, ) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, @@ -186,6 +188,7 @@ where let network = NetworkBridge::new( network, + sync, config.clone(), persistent_data.set_state.clone(), None, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 93d20110ff5af..c427289728074 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -247,6 +247,7 @@ fn initialize_grandpa( net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); (net.peers[peer_id].network_service().clone(), link) }; + let sync = net.peers[peer_id].sync_service().clone(); let grandpa_params = GrandpaParams { config: Config { @@ -261,6 +262,7 @@ fn initialize_grandpa( }, link, network: net_service, + sync, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -393,6 +395,7 @@ fn finalize_3_voters_1_full_observer() { runtime.spawn({ let peer_id = 3; let net_service = net.peers[peer_id].network_service().clone(); + let sync = net.peers[peer_id].sync_service().clone(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); let grandpa_params = GrandpaParams { @@ -408,6 +411,7 @@ fn finalize_3_voters_1_full_observer() { }, link, network: net_service, + sync, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -477,11 +481,15 @@ fn transition_3_voters_twice_1_full_observer() { for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { let keystore = create_keystore(local_key); - let (net_service, link) = { + let (net_service, link, sync) = { let net = net.lock(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - (net.peers[peer_id].network_service().clone(), link) + ( + net.peers[peer_id].network_service().clone(), + link, + net.peers[peer_id].sync_service().clone(), + ) }; let grandpa_params = GrandpaParams { @@ -497,6 +505,7 @@ fn transition_3_voters_twice_1_full_observer() { }, link, network: net_service, + sync, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -943,6 +952,7 @@ fn voter_persists_its_votes() { communication::NetworkBridge::new( net.peers[1].network_service().clone(), + net.peers[1].sync_service().clone(), config.clone(), set_state, None, @@ -960,6 +970,7 @@ fn voter_persists_its_votes() { let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); (net.peers[0].network_service().clone(), link) }; + let sync = net.peers[0].sync_service().clone(); let grandpa_params = GrandpaParams { config: Config { @@ -974,6 +985,7 @@ fn voter_persists_its_votes() { }, link, network: net_service, + sync, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -994,6 +1006,7 @@ fn voter_persists_its_votes() { // the network service of this new peer net.add_authority_peer(); let net_service = net.peers[2].network_service().clone(); + let sync = net.peers[2].sync_service().clone(); // but we'll reuse the client from the first peer (alice_voter1) // since we want to share the same database, so that we can // read the persisted state after aborting alice_voter1. @@ -1015,6 +1028,7 @@ fn voter_persists_its_votes() { }, link, network: net_service, + sync, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1179,6 +1193,7 @@ fn finalize_3_voters_1_light_observer() { }, net.peers[3].data.lock().take().expect("link initialized at startup; qed"), net.peers[3].network_service().clone(), + net.peers[3].sync_service().clone(), ) .unwrap(); net.peer(0).push_blocks(20, false); @@ -1213,6 +1228,7 @@ fn voter_catches_up_to_latest_round_when_behind() { link, net: Arc>| -> Pin + Send>> { + let mut net = net.lock(); let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -1225,7 +1241,8 @@ fn voter_catches_up_to_latest_round_when_behind() { protocol_name: grandpa_protocol_name::NAME.into(), }, link, - network: net.lock().peer(peer_id).network_service().clone(), + network: net.peer(peer_id).network_service().clone(), + sync: net.peer(peer_id).sync_service().clone(), voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1321,6 +1338,7 @@ fn test_environment( link: &TestLinkHalf, keystore: Option, network_service: N, + sync_service: Arc, voting_rule: VR, ) -> TestEnvironment where @@ -1340,8 +1358,14 @@ where protocol_name: grandpa_protocol_name::NAME.into(), }; - let network = - NetworkBridge::new(network_service.clone(), config.clone(), set_state.clone(), None, None); + let network = NetworkBridge::new( + network_service.clone(), + sync_service, + config.clone(), + set_state.clone(), + None, + None, + ); Environment { authority_set: authority_set.clone(), @@ -1370,19 +1394,22 @@ fn grandpa_environment_respects_voting_rules() { let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); let link = peer.data.lock().take().unwrap(); // add 21 blocks peer.push_blocks(21, false); // create an environment with no voting rule restrictions - let unrestricted_env = test_environment(&link, None, network_service.clone(), ()); + let unrestricted_env = + test_environment(&link, None, network_service.clone(), sync_service.clone(), ()); // another with 3/4 unfinalized chain voting rule restriction let three_quarters_env = test_environment( &link, None, network_service.clone(), + sync_service.clone(), voting_rule::ThreeQuartersOfTheUnfinalizedChain, ); @@ -1392,6 +1419,7 @@ fn grandpa_environment_respects_voting_rules() { &link, None, network_service.clone(), + sync_service, VotingRulesBuilder::default().build(), ); @@ -1479,10 +1507,12 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(peers[0]); - let environment = test_environment(&link, Some(keystore), network_service.clone(), ()); + let environment = + test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()); let round_state = || finality_grandpa::round::State::genesis(Default::default()); let base = || Default::default(); @@ -1680,9 +1710,10 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(alice); - test_environment(&link, Some(keystore), network_service.clone(), ()) + test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()) }; let signed_prevote = { diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index c716f39c61d4f..3b7bd4e2464c0 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,7 +21,10 @@ use crate::{ Network, Validator, }; -use sc_network_common::protocol::{event::Event, ProtocolName}; +use sc_network_common::{ + protocol::{event::Event, ProtocolName}, + sync::{SyncEvent, SyncEventStream}, +}; use sc_peerset::ReputationChange; use futures::{ @@ -49,6 +52,8 @@ pub struct GossipEngine { /// Incoming events from the network. network_event_stream: Pin + Send>>, + /// Incoming events from the syncing service. + sync_event_stream: Pin + Send>>, /// Outgoing events to the consumer. message_sinks: HashMap>>, /// Buffered messages (see [`ForwardingState`]). @@ -77,6 +82,7 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, + sync: Arc, protocol: impl Into, validator: Arc>, metrics_registry: Option<&Registry>, @@ -86,6 +92,7 @@ impl GossipEngine { { let protocol = protocol.into(); let network_event_stream = network.event_stream("network-gossip"); + let sync_event_stream = sync.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), @@ -94,6 +101,7 @@ impl GossipEngine { protocol, network_event_stream, + sync_event_stream, message_sinks: HashMap::new(), forwarding_state: ForwardingState::Idle, @@ -175,28 +183,25 @@ impl Future for GossipEngine { 'outer: loop { match &mut this.forwarding_state { ForwardingState::Idle => { - match this.network_event_stream.poll_next_unpin(cx) { + // TODO(aaro): can this be refactored? + let net_event_stream = this.network_event_stream.poll_next_unpin(cx); + let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx); + + if net_event_stream.is_pending() && sync_event_stream.is_pending() { + break + } + + match net_event_stream { Poll::Ready(Some(event)) => match event { - Event::SyncConnected { remote } => { - this.network.add_set_reserved(remote, this.protocol.clone()); - }, - Event::SyncDisconnected { remote } => { - this.network.remove_peers_from_reserved_set( - this.protocol.clone(), - vec![remote], - ); - }, - Event::NotificationStreamOpened { remote, protocol, role, .. } => { - if protocol != this.protocol { - continue - } - this.state_machine.new_peer(&mut *this.network, remote, role); - }, + Event::NotificationStreamOpened { remote, protocol, role, .. } => + if protocol == this.protocol { + this.state_machine.new_peer(&mut *this.network, remote, role); + }, Event::NotificationStreamClosed { remote, protocol } => { - if protocol != this.protocol { - continue + if protocol == this.protocol { + this.state_machine + .peer_disconnected(&mut *this.network, remote); } - this.state_machine.peer_disconnected(&mut *this.network, remote); }, Event::NotificationsReceived { remote, messages } => { let messages = messages @@ -225,7 +230,30 @@ impl Future for GossipEngine { self.is_terminated = true; return Poll::Ready(()) }, - Poll::Pending => break, + Poll::Pending => {}, + } + + // TODO(aaro): this is not correct + match sync_event_stream { + Poll::Ready(Some(event)) => match event { + SyncEvent::PeerConnected(remote) => { + println!("bridge: {remote:?} connected"); + this.network.add_set_reserved(remote, this.protocol.clone()); + }, + SyncEvent::PeerDisconnected(remote) => { + println!("bridge: {remote:?} disconnected"); + this.network.remove_peers_from_reserved_set( + this.protocol.clone(), + vec![remote], + ); + }, + }, + // The sync event stream closed. Do the same for [`GossipValidator`]. + Poll::Ready(None) => { + self.is_terminated = true; + return Poll::Ready(()) + }, + Poll::Pending => {}, } }, ForwardingState::Busy(to_forward) => { @@ -424,7 +452,7 @@ mod tests { impl NetworkNotification for TestNetwork { fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { - unimplemented!(); + // TODO(aaro): why this must be disabled } fn notification_sender( @@ -454,6 +482,28 @@ mod tests { } } + #[derive(Clone, Default)] + struct TestSync { + inner: Arc>, + } + + #[derive(Clone, Default)] + struct TestSyncInner { + event_senders: Vec>, + } + + impl SyncEventStream for TestSync { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + let (tx, rx) = unbounded(); + self.inner.lock().unwrap().event_senders.push(tx); + + Box::pin(rx) + } + } + struct AllowAll; impl Validator for AllowAll { fn validate( @@ -473,8 +523,10 @@ mod tests { #[test] fn returns_when_network_event_stream_closes() { let network = TestNetwork::default(); + let sync = Arc::new(TestSync::default()); let mut gossip_engine = GossipEngine::::new( network.clone(), + sync, "/my_protocol", Arc::new(AllowAll {}), None, @@ -500,9 +552,11 @@ mod tests { let protocol = ProtocolName::from("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); + let sync = Arc::new(TestSync::default()); let mut gossip_engine = GossipEngine::::new( network.clone(), + sync.clone(), protocol.clone(), Arc::new(AllowAll {}), None, @@ -617,6 +671,7 @@ mod tests { let protocol = ProtocolName::from("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); + let sync = Arc::new(TestSync::default()); let num_channels_per_topic = channels.iter().fold( HashMap::new(), @@ -643,6 +698,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), + sync.clone(), protocol.clone(), Arc::new(TestValidator {}), None, @@ -669,6 +725,7 @@ mod tests { } let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); + let mut _syncevent_sender = sync.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. event_sender diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index bf4a89c70b88c..fd6cd4814ff7d 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -31,6 +31,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } serde = { version = "1.0.136", features = ["derive"] } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index 236913df1b120..f3d5ea1a3c2b0 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -48,18 +48,6 @@ pub enum Event { /// Event generated by a DHT. Dht(DhtEvent), - /// Now connected to a new peer for syncing purposes. - SyncConnected { - /// Node we are now syncing from. - remote: PeerId, - }, - - /// Now disconnected from a peer for syncing purposes. - SyncDisconnected { - /// Node we are no longer syncing from. - remote: PeerId, - }, - /// Opened a substream with the given node with the given notifications protocol. /// /// The protocol is always one of the notification protocols that have been registered. diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index b464484fe2ad9..2cab67c6215c8 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -22,14 +22,12 @@ use crate::{ config::MultiaddrWithPeerId, protocol::{event::Event, ProtocolName}, request_responses::{IfDisconnected, RequestFailure}, - sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, }; use futures::{channel::oneshot, Stream}; pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; use libp2p::{Multiaddr, PeerId}; use sc_peerset::ReputationChange; pub use signature::Signature; -use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; mod signature; diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 052dd55e5faad..928e161b08630 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -286,6 +286,33 @@ where } } +/// Syncing-related events that other protocols can subscribe to. +pub enum SyncEvent { + /// Peer that the syncing implementation is tracking connected. + PeerConnected(PeerId), + + /// Peer that the syncing implementation was tracking disconnected. + PeerDisconnected(PeerId), +} + +use futures::Stream; +use std::pin::Pin; + +pub trait SyncEventStream: Send + Sync { + /// Subscribe to syncing-related events. + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl SyncEventStream for Arc +where + T: ?Sized, + T: SyncEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} + /// Something that represents the syncing strategy to download past and future blocks of the chain. pub trait ChainSync: Send { /// Returns the state of the sync of the given peer. diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 3a977edbca574..6e6efe7544516 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -151,12 +151,6 @@ pub enum BehaviourOut { messages: Vec<(ProtocolName, Bytes)>, }, - /// Now connected to a new peer for syncing purposes. - SyncConnected(PeerId), - - /// No longer connected to a peer for syncing purposes. - SyncDisconnected(PeerId), - /// We have obtained identity information from a peer, including the addresses it is listening /// on. PeerIdentify { @@ -321,9 +315,6 @@ impl From> for BehaviourOut { CustomMessageOutcome::NotificationsReceived { remote, messages } => BehaviourOut::NotificationsReceived { remote, messages }, CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, - CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id), - CustomMessageOutcome::SyncDisconnected(peer_id) => - BehaviourOut::SyncDisconnected(peer_id), CustomMessageOutcome::None => BehaviourOut::None, } } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index a61c2ed7b46a7..6de9fd8d74d02 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -272,7 +272,7 @@ pub use sc_network_common::{ }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - ExtendedPeerInfo, StateDownloadProgress, SyncState, SyncStatusProvider, + ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, }, }; pub use service::{ @@ -302,6 +302,7 @@ pub trait ChainSyncInterface: + Link + NetworkBlock> + SyncStatusProvider + + SyncEventStream + Send + Sync + 'static @@ -314,6 +315,7 @@ impl ChainSyncInterface for T where + Link + NetworkBlock> + SyncStatusProvider + + SyncEventStream + Send + Sync + 'static diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c54022a35086e..32da3e447cf4e 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -464,21 +464,12 @@ pub enum CustomMessageOutcome { notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { - remote: PeerId, - protocol: ProtocolName, - }, + NotificationStreamClosed { remote: PeerId, protocol: ProtocolName }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { - remote: PeerId, - messages: Vec<(ProtocolName, Bytes)>, - }, + NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), /// Now connected to a new peer for syncing purposes. - SyncConnected(PeerId), - /// No longer connected to a peer for syncing purposes. - SyncDisconnected(PeerId), None, } @@ -608,17 +599,14 @@ where let roles = handshake.roles; if self.engine.on_sync_peer_connected(peer_id, handshake).is_ok() { - self.pending_messages.push_back( - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)] - .clone(), - negotiated_fallback, - roles, - notifications_sink, - }, - ); - CustomMessageOutcome::SyncConnected(peer_id) + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + } } else { CustomMessageOutcome::None } @@ -644,18 +632,15 @@ where .on_sync_peer_connected(peer_id, handshake) .is_ok() { - self.pending_messages.push_back( - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id)] - .clone(), - negotiated_fallback, - roles, - notifications_sink, - }, - ); - CustomMessageOutcome::SyncConnected(peer_id) + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id)] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + } } else { CustomMessageOutcome::None } @@ -726,16 +711,15 @@ where NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { - if self.engine.on_sync_peer_disconnected(peer_id).is_ok() { - CustomMessageOutcome::SyncDisconnected(peer_id) - } else { + if self.engine.on_sync_peer_disconnected(peer_id).is_err() { log::trace!( target: "sync", "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", peer_id ); - CustomMessageOutcome::None } + + CustomMessageOutcome::None } else if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { // The substream that has just been closed had been opened with a bad // handshake. The outer layers have never received an opening event about this diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 4b8083d299f79..72de711e04a15 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1627,12 +1627,6 @@ where } this.event_streams.send(Event::NotificationsReceived { remote, messages }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote))) => { - this.event_streams.send(Event::SyncConnected { remote }); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncDisconnected(remote))) => { - this.event_streams.send(Event::SyncDisconnected { remote }); - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => { if let Some(metrics) = this.metrics.as_ref() { let query_type = match event { diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 4144d7f19551e..c8f3e1b1a7a3f 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -237,16 +237,6 @@ impl Metrics { Event::Dht(_) => { self.events_total.with_label_values(&["dht", "sent", name]).inc_by(num); }, - Event::SyncConnected { .. } => { - self.events_total - .with_label_values(&["sync-connected", "sent", name]) - .inc_by(num); - }, - Event::SyncDisconnected { .. } => { - self.events_total - .with_label_values(&["sync-disconnected", "sent", name]) - .inc_by(num); - }, Event::NotificationStreamOpened { protocol, .. } => { format_label("notif-open-", protocol, |protocol_label| { self.events_total @@ -280,14 +270,6 @@ impl Metrics { Event::Dht(_) => { self.events_total.with_label_values(&["dht", "received", name]).inc(); }, - Event::SyncConnected { .. } => { - self.events_total.with_label_values(&["sync-connected", "received", name]).inc(); - }, - Event::SyncDisconnected { .. } => { - self.events_total - .with_label_values(&["sync-disconnected", "received", name]) - .inc(); - }, Event::NotificationStreamOpened { protocol, .. } => { format_label("notif-open-", protocol, |protocol_label| { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 516e80bfc6390..871a0b7f23521 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -205,10 +205,6 @@ fn notifications_state_consistent() { }, // Add new events here. - future::Either::Left(Event::SyncConnected { .. }) => {}, - future::Either::Right(Event::SyncConnected { .. }) => {}, - future::Either::Left(Event::SyncDisconnected { .. }) => {}, - future::Either::Right(Event::SyncDisconnected { .. }) => {}, future::Either::Left(Event::Dht(_)) => {}, future::Either::Right(Event::Dht(_)) => {}, }; diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 5269bbbfbb78a..33116288b9032 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -38,11 +38,12 @@ use sc_network_common::{ BlockAnnounce, BlockAnnouncesHandshake, BlockState, }, warp::WarpSyncProvider, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncMode, + BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent, + SyncMode, }, utils::LruHashSet, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::{ @@ -146,6 +147,9 @@ pub struct SyncingEngine { /// Genesis hash. genesis_hash: B::Hash, + /// Set of channels for other protocols that have subscribed to syncing events. + event_streams: Vec>, + /// All connected peers. Contains both full and light node peers. pub peers: HashMap>, @@ -255,6 +259,7 @@ where default_peers_set_no_slot_peers, default_peers_set_num_full, default_peers_set_num_light, + event_streams: Vec::new(), metrics: if let Some(r) = metrics_registry { match Metrics::register(r) { Ok(metrics) => Some(metrics), @@ -488,6 +493,7 @@ where ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.chain_sync.set_sync_fork_request(peers, &hash, number); }, + ToServiceCommand::EventStream(tx) => self.event_streams.push(tx), ToServiceCommand::RequestJustification(hash, number) => self.chain_sync.request_justification(&hash, number), ToServiceCommand::ClearJustificationRequests => @@ -540,6 +546,8 @@ where if let Some(_peer_data) = self.peers.remove(&peer) { self.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); + self.event_streams + .retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer)).is_ok()); Ok(()) } else { Err(()) @@ -675,6 +683,11 @@ where self.chain_sync.send_block_request(who, req); } + self.event_streams.retain(|stream| { + println!("sync: {who:?} connected"); + stream.unbounded_send(SyncEvent::PeerConnected(who)).is_ok() + }); + Ok(()) } } diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 3edb186f65235..ac5e937b2a16d 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -20,19 +20,20 @@ // TODO(aaro): document functions // TODO(aaro): rename this file to sync_service.rs? -use futures::channel::oneshot; +use futures::{channel::oneshot, Stream}; use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network_common::{ service::{NetworkBlock, NetworkSyncForkRequest}, - sync::{SyncStatus, SyncStatusProvider}, + sync::{SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider}, }; -use sc_utils::mpsc::TracingUnboundedSender; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::pin::Pin; + /// Commands send to `ChainSync` -#[derive(Debug)] pub enum ToServiceCommand { Status(oneshot::Sender>), SetSyncForkRequest(Vec, B::Hash, NumberFor), @@ -46,6 +47,7 @@ pub enum ToServiceCommand { JustificationImported(PeerId, B::Hash, NumberFor, bool), AnnounceBlock(B::Hash, Option>), NewBestBlockImported(B::Hash, NumberFor), + EventStream(TracingUnboundedSender), } /// Handle for communicating with `ChainSync` asynchronously @@ -133,6 +135,15 @@ impl Link for ChainSyncInterfaceHandle { } } +impl SyncEventStream for ChainSyncInterfaceHandle { + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + println!("sync: register {name}"); + let (tx, rx) = tracing_unbounded(name); + let _ = self.tx.unbounded_send(ToServiceCommand::EventStream(tx)); + Box::pin(rx) + } +} + impl NetworkBlock> for ChainSyncInterfaceHandle { fn announce_block(&self, hash: B::Hash, data: Option>) { let _ = self.tx.unbounded_send(ToServiceCommand::AnnounceBlock(hash, data)); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index ddfb88877b973..fe997ee8dd2ed 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -62,8 +62,10 @@ use sc_network_common::{ }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, - state_request_handler::StateRequestHandler, warp_request_handler, + block_request_handler::BlockRequestHandler, + service::{chain_sync::ChainSyncInterfaceHandle, network::NetworkServiceProvider}, + state_request_handler::StateRequestHandler, + warp_request_handler, }; use sc_service::client::Client; use sp_blockchain::{ @@ -235,7 +237,7 @@ pub struct Peer { select_chain: Option>, backend: Option>, network: NetworkWorker::Hash, PeersFullClient>, - chain_sync_service: Box>, + chain_sync_service: Arc>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, listen_addr: Multiaddr, @@ -509,6 +511,10 @@ where self.network.service() } + pub fn sync_service(&self) -> &Arc> { + &self.chain_sync_service + } + /// Get a reference to the network worker. pub fn network(&self) -> &NetworkWorker::Hash, PeersFullClient> { &self.network @@ -947,6 +953,7 @@ where protocol_id, fork_id, engine, + // TODO(aaro): fix arcs chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: None, block_announce_config, @@ -991,7 +998,7 @@ where block_import, verifier, network, - chain_sync_service: Box::new(chain_sync_service), + chain_sync_service: Arc::new(chain_sync_service), listen_addr, }); }); diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index 147a86d8de2ae..3e407748794ca 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -24,5 +24,6 @@ pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 5239a94ef23f3..bb2ce9bb1a3d1 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -37,6 +37,7 @@ use sc_network_common::{ error, protocol::{event::Event, role::ObservedRole, ProtocolName}, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, + sync::{SyncEvent, SyncEventStream}, utils::{interval, LruHashSet}, ExHashT, }; @@ -164,10 +165,12 @@ impl TransactionsHandlerPrototype { >( self, service: S, + sync_service: Arc, transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let event_stream = service.event_stream("transactions-handler"); + let net_event_stream = service.event_stream("transactions-handler-net"); + let sync_event_stream = sync_service.event_stream("transactions-handler-sync"); let (to_handler, from_controller) = mpsc::unbounded(); let handler = TransactionsHandler { @@ -176,7 +179,8 @@ impl TransactionsHandlerPrototype { pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), service, - event_stream, + net_event_stream, + sync_event_stream, peers: HashMap::new(), transaction_pool, from_controller, @@ -240,7 +244,9 @@ pub struct TransactionsHandler< /// Network service to use to send messages and manage peers. service: S, /// Stream of networking events. - event_stream: Pin + Send>>, + net_event_stream: Pin + Send>>, + /// Receiver for syncing-related events. + sync_event_stream: Pin + Send>>, // All connected peers peers: HashMap>, transaction_pool: Arc>, @@ -278,7 +284,7 @@ where warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); } }, - network_event = self.event_stream.next().fuse() => { + network_event = self.net_event_stream.next().fuse() => { if let Some(network_event) = network_event { self.handle_network_event(network_event).await; } else { @@ -286,6 +292,14 @@ where return; } }, + sync_event = self.sync_event_stream.next().fuse() => { + if let Some(sync_event) = sync_event { + self.handle_sync_event(sync_event); + } else { + // Syncing has seemingly closed. Closing as well. + return; + } + } message = self.from_controller.select_next_some().fuse() => { match message { ToHandler::PropagateTransaction(hash) => self.propagate_transaction(&hash), @@ -296,10 +310,9 @@ where } } - async fn handle_network_event(&mut self, event: Event) { + fn handle_sync_event(&mut self, event: SyncEvent) { match event { - Event::Dht(_) => {}, - Event::SyncConnected { remote } => { + SyncEvent::PeerConnected(remote) => { let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) .collect::(); let result = self.service.add_peers_to_reserved_set( @@ -310,13 +323,18 @@ where log::error!(target: "sync", "Add reserved peer failed: {}", err); } }, - Event::SyncDisconnected { remote } => { + SyncEvent::PeerDisconnected(remote) => { self.service.remove_peers_from_reserved_set( self.protocol_name.clone(), iter::once(remote).collect(), ); }, + } + } + async fn handle_network_event(&mut self, event: Event) { + match event { + Event::Dht(_) => {}, Event::NotificationStreamOpened { remote, protocol, role, .. } if protocol == self.protocol_name => { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 384d12889f8d1..b00cad9abdc65 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -977,6 +977,8 @@ where let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( network.clone(), + // TODO(aaro): wrap chainsyncinterface into an arc + Arc::new(chain_sync_service.clone()), Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), config.prometheus_config.as_ref().map(|config| &config.registry), )?; From 0b11339faa3eb3f7bdc135116d2210ca47f66dec Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Fri, 25 Nov 2022 11:38:00 +0200 Subject: [PATCH 09/30] Introduce `ChainSyncInterface` This interface provides a set of miscellaneous functions that other subsystems can use to query, for example, the syncing status. --- client/beefy/src/tests.rs | 4 + client/consensus/aura/src/lib.rs | 5 + client/consensus/babe/src/tests.rs | 4 + client/finality-grandpa/src/tests.rs | 4 + client/network/common/src/sync.rs | 36 ++++- client/network/src/behaviour.rs | 2 +- client/network/src/lib.rs | 8 +- client/network/src/protocol.rs | 72 +-------- client/network/src/service.rs | 38 +---- client/network/src/service/metrics.rs | 35 +---- client/network/sync/src/engine.rs | 145 +++++++++++++++--- client/network/sync/src/service/chain_sync.rs | 107 ++++++++++++- client/network/test/src/lib.rs | 4 +- client/service/src/builder.rs | 1 + client/service/src/lib.rs | 7 +- 15 files changed, 303 insertions(+), 169 deletions(-) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 6df47af10e336..4181011b30c8c 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -209,6 +209,10 @@ impl TestNetFactory for BeefyTestNet { &self.peers } + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 50a02726cf56a..3ec2a26c60beb 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -764,6 +764,11 @@ mod tests { fn peers(&self) -> &Vec { &self.peers } + + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 8bef1b38b929d..6ef5f0ee41624 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -352,6 +352,10 @@ impl TestNetFactory for BabeTestNet { &self.peers } + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index c427289728074..4326d024580e7 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -146,6 +146,10 @@ impl TestNetFactory for GrandpaTestNet { &self.peers } + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 928e161b08630..41942135bbf08 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -23,6 +23,7 @@ pub mod metrics; pub mod warp; use crate::protocol::role::Roles; +use futures::{channel::oneshot, Stream}; use libp2p::PeerId; @@ -37,7 +38,7 @@ use sp_runtime::{ }; use warp::WarpSyncProgress; -use std::{any::Any, fmt, fmt::Formatter, sync::Arc, task::Poll}; +use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc, task::Poll}; /// The sync status of a peer we are trying to sync with #[derive(Debug)] @@ -295,9 +296,6 @@ pub enum SyncEvent { PeerDisconnected(PeerId), } -use futures::Stream; -use std::pin::Pin; - pub trait SyncEventStream: Send + Sync { /// Subscribe to syncing-related events. fn event_stream(&self, name: &'static str) -> Pin + Send>>; @@ -469,3 +467,33 @@ pub trait ChainSync: Send { /// Send block request to peer fn send_block_request(&mut self, who: PeerId, request: BlockRequest); } + +// TODO(aaro): is this needed at all? +#[async_trait::async_trait] +pub trait ChainSyncService: Send + Sync { + /// Returns the number of peers we're connected to and that are being queried. + async fn num_active_peers(&self) -> Result; + + /// Target sync block number. + async fn best_seen_block(&self) -> Result>, oneshot::Canceled>; + + /// Number of peers participating in syncing. + async fn num_sync_peers(&self) -> Result; + + /// Number of blocks in the import queue. + async fn num_queued_blocks(&self) -> Result; + + /// Number of downloaded blocks. + async fn num_downloaded_blocks(&self) -> Result; + + /// Number of active sync requests. + async fn num_sync_requests(&self) -> Result; + + /// Returns information about all the peers we are connected to after the handshake message. + async fn peers_info(&self) + -> Result)>, oneshot::Canceled>; + + /// Call this when a block has been finalized. The sync layer may have some additional + /// requesting to perform. + fn on_block_finalized(&self, hash: Block::Hash, header: Block::Header); +} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 6e6efe7544516..e35d16acbc3c3 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -314,7 +314,7 @@ impl From> for BehaviourOut { BehaviourOut::NotificationStreamClosed { remote, protocol }, CustomMessageOutcome::NotificationsReceived { remote, messages } => BehaviourOut::NotificationsReceived { remote, messages }, - CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, + CustomMessageOutcome::_PeerNewBest(_peer_id, _number) => BehaviourOut::None, CustomMessageOutcome::None => BehaviourOut::None, } } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 6de9fd8d74d02..7c23faa95369f 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -272,13 +272,15 @@ pub use sc_network_common::{ }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, + ChainSyncService, ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, + SyncStatusProvider, }, }; pub use service::{ DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, }; +use sp_consensus::SyncOracle; use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sc_peerset::ReputationChange; @@ -303,6 +305,8 @@ pub trait ChainSyncInterface: + NetworkBlock> + SyncStatusProvider + SyncEventStream + + ChainSyncService + + SyncOracle + Send + Sync + 'static @@ -316,6 +320,8 @@ impl ChainSyncInterface for T where + NetworkBlock> + SyncStatusProvider + SyncEventStream + + ChainSyncService + + SyncOracle + Send + Sync + 'static diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 32da3e447cf4e..5f79b502344e9 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -29,7 +29,7 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, log, trace, warn, Level}; +use log::{debug, error, trace, warn}; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; @@ -39,20 +39,17 @@ use sc_network_common::{ protocol::{role::Roles, ProtocolName}, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake}, - BadPeer, ExtendedPeerInfo, SyncStatus, + ExtendedPeerInfo, }, - utils::{interval, LruHashSet}, + utils::interval, }; -use sc_network_sync::engine::{Peer, SyncingEngine}; -use sp_arithmetic::traits::SaturatedConversion; +use sc_network_sync::engine::SyncingEngine; use sp_blockchain::HeaderMetadata; -use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{ collections::{HashSet, VecDeque}, io, iter, - num::NonZeroUsize, pin::Pin, - sync::Arc, task::Poll, time, }; @@ -63,12 +60,6 @@ pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; -/// Interval at which we perform time based maintenance -const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); - -/// Maximum number of known block hashes to keep for a peer. -const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead - /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; @@ -79,33 +70,16 @@ const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0); /// superior to this value corresponds to a user-defined protocol. const NUM_HARDCODED_PEERSETS: usize = 1; -/// When light node connects to the full node and the full node is behind light node -/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful -/// and disconnect to free connection slot. -const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; - mod rep { use sc_peerset::ReputationChange as Rep; - /// Reputation change when we are a light client and a peer is behind us. - pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - /// Peer has different genesis. - pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); - /// Peer role does not match (e.g. light peer connecting to another light peer). - pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); } // Lock must always be taken in order declared here. pub struct Protocol { - /// Interval at which we call `tick`. - tick_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - /// Assigned roles. - roles: Roles, - genesis_hash: B::Hash, - chain: Arc, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, /// Handles opening the unique substream and sending and receiving raw messages. @@ -137,24 +111,10 @@ where /// Create a new instance. pub fn new( roles: Roles, - chain: Arc, network_config: &config::NetworkConfiguration, block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, engine: SyncingEngine, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { - let info = chain.info(); - - let default_peers_set_no_slot_peers = { - let mut no_slot_p: HashSet = network_config - .default_peers_set - .reserved_nodes - .iter() - .map(|reserved| reserved.peer_id) - .collect(); - no_slot_p.shrink_to_fit(); - no_slot_p - }; - let mut known_addresses = Vec::new(); let (peerset, peerset_handle) = { @@ -229,11 +189,7 @@ where }; let protocol = Self { - tick_timeout: Box::pin(interval(TICK_TIMEOUT)), pending_messages: VecDeque::new(), - roles, - chain, - genesis_hash: info.genesis_hash, peerset_handle: peerset_handle.clone(), behaviour, notification_protocols: iter::once(block_announces_protocol.notifications_protocol) @@ -282,11 +238,6 @@ where self.engine.chain_sync.num_active_peers() } - /// Current global sync state. - pub fn sync_state(&self) -> SyncStatus { - self.engine.chain_sync.status() - } - /// Target sync block number. pub fn best_seen_block(&self) -> Option> { self.engine.chain_sync.status().best_seen_block @@ -337,13 +288,6 @@ where } } - // TODO: implement block fianalized for chainsyncinterface - /// Call this when a block has been finalized. The sync layer may have some additional - /// requesting to perform. - pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.engine.chain_sync.on_block_finalized(&hash, *header.number()) - } - /// Set whether the syncing peers set is in reserved-only mode. pub fn set_reserved_only(&self, reserved_only: bool) { self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); @@ -468,7 +412,7 @@ pub enum CustomMessageOutcome { /// Messages have been received on one or more notifications protocols. NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> }, /// Peer has a reported a new head of chain. - PeerNewBest(PeerId, NumberFor), + _PeerNewBest(PeerId, NumberFor), /// Now connected to a new peer for syncing purposes. None, } @@ -550,10 +494,6 @@ where // poll syncing engine self.engine.poll(cx); - while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { - self.engine.report_metrics(); - } - if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 72de711e04a15..8bd3b6ee706a6 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -69,7 +69,7 @@ use sc_network_common::{ NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, - sync::{ExtendedPeerInfo, SyncStatus}, + sync::ExtendedPeerInfo, ExHashT, }; use sc_peerset::PeersetHandle; @@ -85,7 +85,7 @@ use std::{ pin::Pin, str, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, + atomic::{AtomicUsize, Ordering}, Arc, }, task::Poll, @@ -107,8 +107,6 @@ pub struct NetworkService { num_connected: Arc, /// The local external addresses. external_addresses: Arc>>, - /// Are we actively catching up with the chain? - is_major_syncing: Arc, /// Local copy of the `PeerId` of the local node. local_peer_id: PeerId, /// The `KeyPair` that defines the `PeerId` of the local node. @@ -229,7 +227,6 @@ where let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), - params.chain.clone(), ¶ms.network_config, params.block_announce_config, params.engine, @@ -266,7 +263,6 @@ where })?; let num_connected = Arc::new(AtomicUsize::new(0)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { @@ -406,7 +402,6 @@ where registry, MetricSources { bandwidth: bandwidth.clone(), - major_syncing: is_major_syncing.clone(), connected_peers: num_connected.clone(), }, )?), @@ -436,7 +431,6 @@ where bandwidth, external_addresses: external_addresses.clone(), num_connected: num_connected.clone(), - is_major_syncing: is_major_syncing.clone(), peerset: peerset_handle, local_peer_id, local_identity, @@ -452,7 +446,6 @@ where Ok(NetworkWorker { external_addresses, num_connected, - is_major_syncing, network_service: swarm, service, from_service, @@ -529,14 +522,6 @@ where &self.service } - /// You must call this when a new block is finalized by the client. - pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { - self.network_service - .behaviour_mut() - .user_protocol_mut() - .on_block_finalized(hash, &header); - } - /// Returns the local `PeerId`. pub fn local_peer_id(&self) -> &PeerId { Swarm::>::local_peer_id(&self.network_service) @@ -718,11 +703,11 @@ impl NetworkService { impl sp_consensus::SyncOracle for NetworkService { fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) + self.chain_sync_service.is_major_syncing() } fn is_offline(&self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + self.chain_sync_service.is_offline() } } @@ -1244,8 +1229,6 @@ where external_addresses: Arc>>, /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. num_connected: Arc, - /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - is_major_syncing: Arc, /// The network service that can be extracted and shared through the codebase. service: Arc>, /// The *actual* network. @@ -1840,10 +1823,9 @@ where }; } + // // Update the variables shared with the `NetworkService`. let num_connected_peers = this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); - - // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); { let external_addresses = @@ -1854,16 +1836,6 @@ where *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = this - .network_service - .behaviour_mut() - .user_protocol_mut() - .sync_state() - .state - .is_major_syncing(); - - this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); - if let Some(metrics) = this.metrics.as_ref() { if let Some(buckets) = this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index a099bba716eb9..5dcadac95225a 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -24,7 +24,7 @@ use prometheus_endpoint::{ use std::{ str, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, + atomic::{AtomicUsize, Ordering}, Arc, }, }; @@ -34,7 +34,6 @@ pub use prometheus_endpoint::{Histogram, HistogramVec}; /// Registers all networking metrics with the given registry. pub fn register(registry: &Registry, sources: MetricSources) -> Result { BandwidthCounters::register(registry, sources.bandwidth)?; - MajorSyncingGauge::register(registry, sources.major_syncing)?; NumConnectedGauge::register(registry, sources.connected_peers)?; Metrics::register(registry) } @@ -42,7 +41,6 @@ pub fn register(registry: &Registry, sources: MetricSources) -> Result, - pub major_syncing: Arc, pub connected_peers: Arc, } @@ -266,37 +264,6 @@ impl MetricSource for BandwidthCounters { } } -/// The "major syncing" metric. -#[derive(Clone)] -pub struct MajorSyncingGauge(Arc); - -impl MajorSyncingGauge { - /// Registers the `MajorSyncGauge` metric whose value is - /// obtained from the given `AtomicBool`. - fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { - prometheus::register( - SourcedGauge::new( - &Opts::new( - "substrate_sub_libp2p_is_major_syncing", - "Whether the node is performing a major sync or not.", - ), - MajorSyncingGauge(value), - )?, - registry, - )?; - - Ok(()) - } -} - -impl MetricSource for MajorSyncingGauge { - type N = u64; - - fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { - set(&[], self.0.load(Ordering::Relaxed) as u64); - } -} - /// The connected peers metric. #[derive(Clone)] pub struct NumConnectedGauge(Arc); diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 33116288b9032..ece397679a338 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -21,10 +21,12 @@ use crate::{ ChainSync, ChainSyncInterfaceHandle, ClientError, }; -use futures::StreamExt; +use futures::{Stream, StreamExt}; use libp2p::PeerId; use lru::LruCache; -use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; +use prometheus_endpoint::{ + register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, +}; use codec::Encode; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; @@ -41,7 +43,7 @@ use sc_network_common::{ BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent, SyncMode, }, - utils::LruHashSet, + utils::{interval, LruHashSet}, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::HeaderMetadata; @@ -55,10 +57,17 @@ use sp_runtime::{ use std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, - sync::Arc, + pin::Pin, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, task::Poll, }; +/// Interval at which we perform time based maintenance +const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100); + /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -71,8 +80,8 @@ mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + // /// We received a message that failed to decode. + // pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer role does not match (e.g. light peer connecting to another light peer). @@ -82,16 +91,17 @@ mod rep { } struct Metrics { - _peers: Gauge, + peers: Gauge, queued_blocks: Gauge, fork_targets: Gauge, justifications: GaugeVec, } impl Metrics { - fn register(r: &Registry) -> Result { + fn register(r: &Registry, major_syncing: Arc) -> Result { + let _ = MajorSyncingGauge::register(r, major_syncing)?; Ok(Self { - _peers: { + peers: { let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?; register(g, r)? }, @@ -118,6 +128,37 @@ impl Metrics { } } +/// The "major syncing" metric. +#[derive(Clone)] +pub struct MajorSyncingGauge(Arc); + +impl MajorSyncingGauge { + /// Registers the `MajorSyncGauge` metric whose value is + /// obtained from the given `AtomicBool`. + fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { + prometheus_endpoint::register( + SourcedGauge::new( + &Opts::new( + "substrate_sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + ), + MajorSyncingGauge(value), + )?, + registry, + )?; + + Ok(()) + } +} + +impl MetricSource for MajorSyncingGauge { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[], self.0.load(Ordering::Relaxed) as u64); + } +} + /// Peer information #[derive(Debug)] pub struct Peer { @@ -135,6 +176,12 @@ pub struct SyncingEngine { /// Blockchain client. client: Arc, + /// Number of peers we're connected to. + num_connected: Arc, + + /// Are we actively catching up with the chain? + is_major_syncing: Arc, + /// Network service. network_service: service::network::NetworkServiceHandle, @@ -150,6 +197,9 @@ pub struct SyncingEngine { /// Set of channels for other protocols that have subscribed to syncing events. event_streams: Vec>, + /// Interval at which we call `tick`. + tick_timeout: Pin + Send>>, + /// All connected peers. Contains both full and light node peers. pub peers: HashMap>, @@ -236,6 +286,8 @@ where let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); + let num_connected = Arc::new(AtomicUsize::new(0)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); let genesis_hash = client .block_hash(0u32.into()) .ok() @@ -251,6 +303,8 @@ where peers: HashMap::new(), block_announce_data_cache: LruCache::new(cache_capacity), block_announce_protocol_name, + num_connected: num_connected.clone(), + is_major_syncing: is_major_syncing.clone(), service_rx, genesis_hash, important_peers, @@ -260,8 +314,9 @@ where default_peers_set_num_full, default_peers_set_num_light, event_streams: Vec::new(), + tick_timeout: Box::pin(interval(TICK_TIMEOUT)), metrics: if let Some(r) = metrics_registry { - match Metrics::register(r) { + match Metrics::register(r, is_major_syncing.clone()) { Ok(metrics) => Some(metrics), Err(err) => { log::error!(target: "sync", "Failed to register metrics {err:?}"); @@ -272,7 +327,7 @@ where None }, }, - ChainSyncInterfaceHandle::new(tx), + ChainSyncInterfaceHandle::new(tx, num_connected, is_major_syncing), block_announce_config, )) } @@ -280,9 +335,8 @@ where /// Report Prometheus metrics. pub fn report_metrics(&self) { if let Some(metrics) = &self.metrics { - // TODO(aaro): fix - // let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); - // metrics.peers.set(n); + let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); + metrics.peers.set(n); let m = self.chain_sync.metrics(); @@ -317,7 +371,7 @@ where } } - // TODO: emit peernewbest event? + // TODO(aaro): emit peernewbest event? /// Process the result of the block announce validation. pub fn process_block_announce_validation_result( &mut self, @@ -482,14 +536,17 @@ where ) } - // TODO(aaro): reorder match properly pub fn poll(&mut self, cx: &mut std::task::Context) { + self.num_connected.store(self.peers.len(), Ordering::Relaxed); + self.is_major_syncing + .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); + + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { + self.report_metrics(); + } + while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { match event { - ToServiceCommand::Status(tx) => - if let Err(_) = tx.send(self.chain_sync.status()) { - log::warn!(target: "sync", "Failed to respond to `Status` query"); - }, ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { self.chain_sync.set_sync_fork_request(peers, &hash, number); }, @@ -525,6 +582,54 @@ where ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), ToServiceCommand::NewBestBlockImported(hash, number) => self.new_best_block_imported(hash, number), + ToServiceCommand::Status(tx) => + if let Err(_) = tx.send(self.chain_sync.status()) { + log::warn!(target: "sync", "Failed to respond to `Status` query"); + }, + ToServiceCommand::NumActivePeers(tx) => { + if let Err(_) = tx.send(self.chain_sync.num_active_peers()) { + log::warn!(target: "sync", "response channel closed for `NumActivePeers`"); + } + }, + ToServiceCommand::SyncState(tx) => { + if let Err(_) = tx.send(self.chain_sync.status()) { + log::warn!(target: "sync", "response channel closed for `SyncState`"); + } + }, + ToServiceCommand::BestSeenBlock(tx) => { + if let Err(_) = tx.send(self.chain_sync.status().best_seen_block) { + log::warn!(target: "sync", "response channel closed for `BestSeenBlock`"); + } + }, + ToServiceCommand::NumSyncPeers(tx) => { + if let Err(_) = tx.send(self.chain_sync.status().num_peers) { + log::warn!(target: "sync", "response channel closed for `NumSyncPeers`"); + } + }, + ToServiceCommand::NumQueuedBlocks(tx) => { + if let Err(_) = tx.send(self.chain_sync.status().queued_blocks) { + log::warn!(target: "sync", "response channel closed for `NumQueuedBlocks`"); + } + }, + ToServiceCommand::NumDownloadedBlocks(tx) => { + if let Err(_) = tx.send(self.chain_sync.num_downloaded_blocks()) { + log::warn!(target: "sync", "response channel closed for `NumDownloadedBlocks`"); + } + }, + ToServiceCommand::NumSyncRequests(tx) => { + if let Err(_) = tx.send(self.chain_sync.num_sync_requests()) { + log::warn!(target: "sync", "response channel closed for `NumSyncRequests`"); + } + }, + ToServiceCommand::PeersInfo(tx) => { + let peers_info = + self.peers.iter().map(|(id, peer)| (*id, peer.info.clone())).collect(); + if let Err(_) = tx.send(peers_info) { + log::warn!(target: "sync", "response channel closed for `PeersInfo`"); + } + }, + ToServiceCommand::OnBlockFinalized(hash, header) => + self.chain_sync.on_block_finalized(&hash, *header.number()), } } diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index ac5e937b2a16d..6aa1617cbb2c6 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -21,21 +21,29 @@ // TODO(aaro): rename this file to sync_service.rs? use futures::{channel::oneshot, Stream}; - use libp2p::PeerId; + use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network_common::{ service::{NetworkBlock, NetworkSyncForkRequest}, - sync::{SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider}, + sync::{ + ChainSyncService, ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, + SyncStatusProvider, + }, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::pin::Pin; +use std::{ + pin::Pin, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; /// Commands send to `ChainSync` pub enum ToServiceCommand { - Status(oneshot::Sender>), SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, @@ -48,18 +56,36 @@ pub enum ToServiceCommand { AnnounceBlock(B::Hash, Option>), NewBestBlockImported(B::Hash, NumberFor), EventStream(TracingUnboundedSender), + Status(oneshot::Sender>), + NumActivePeers(oneshot::Sender), + SyncState(oneshot::Sender>), + BestSeenBlock(oneshot::Sender>>), + NumSyncPeers(oneshot::Sender), + NumQueuedBlocks(oneshot::Sender), + NumDownloadedBlocks(oneshot::Sender), + NumSyncRequests(oneshot::Sender), + PeersInfo(oneshot::Sender)>>), + OnBlockFinalized(B::Hash, B::Header), } /// Handle for communicating with `ChainSync` asynchronously #[derive(Clone)] pub struct ChainSyncInterfaceHandle { tx: TracingUnboundedSender>, + /// Number of peers we're connected to. + num_connected: Arc, + /// Are we actively catching up with the chain? + is_major_syncing: Arc, } impl ChainSyncInterfaceHandle { /// Create new handle - pub fn new(tx: TracingUnboundedSender>) -> Self { - Self { tx } + pub fn new( + tx: TracingUnboundedSender>, + num_connected: Arc, + is_major_syncing: Arc, + ) -> Self { + Self { tx, num_connected, is_major_syncing } } } @@ -153,3 +179,70 @@ impl NetworkBlock> for ChainSyncInterfaceHandle let _ = self.tx.unbounded_send(ToServiceCommand::NewBestBlockImported(hash, number)); } } + +// TODO(aaro): is this needed at all? +#[async_trait::async_trait] +impl ChainSyncService for ChainSyncInterfaceHandle { + async fn num_active_peers(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx)); + + rx.await + } + + async fn best_seen_block(&self) -> Result>, oneshot::Canceled> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::BestSeenBlock(tx)); + + rx.await + } + + async fn num_sync_peers(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncPeers(tx)); + + rx.await + } + + async fn num_queued_blocks(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumQueuedBlocks(tx)); + + rx.await + } + + async fn num_downloaded_blocks(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumDownloadedBlocks(tx)); + + rx.await + } + + async fn num_sync_requests(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncRequests(tx)); + + rx.await + } + + async fn peers_info(&self) -> Result)>, oneshot::Canceled> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::PeersInfo(tx)); + + rx.await + } + + fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { + let _ = self.tx.unbounded_send(ToServiceCommand::OnBlockFinalized(hash, header)); + } +} + +impl sp_consensus::SyncOracle for ChainSyncInterfaceHandle { + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } + + fn is_offline(&self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } +} diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index fe997ee8dd2ed..e4fddd9e4e475 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -1116,7 +1116,9 @@ where while let Poll::Ready(Some(notification)) = peer.finality_notification_stream.as_mut().poll_next(cx) { - peer.network.on_block_finalized(notification.hash, notification.header); + use sc_network::ChainSyncService; + peer.chain_sync_service + .on_block_finalized(notification.hash, notification.header); } } }); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index b00cad9abdc65..ab65e06247772 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -998,6 +998,7 @@ where network_mut, client, system_rpc_rx, + Arc::new(chain_sync_service.clone()), has_bootnodes, config.announce_block, ); diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 091b4bbe9fe5f..263189b51e650 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,7 +42,9 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; -use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; +use sc_network_common::{ + config::MultiaddrWithPeerId, service::NetworkBlock, sync::ChainSyncService, +}; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -154,6 +156,7 @@ async fn build_network_future< mut network: sc_network::NetworkWorker, client: Arc, mut rpc_rx: TracingUnboundedReceiver>, + sync_service: Arc>, should_have_peers: bool, announce_imported_blocks: bool, ) { @@ -190,7 +193,7 @@ async fn build_network_future< // List of blocks that the client has finalized. notification = finality_notification_stream.select_next_some() => { - network.on_block_finalized(notification.hash, notification.header); + sync_service.on_block_finalized(notification.hash, notification.header); } // Answer incoming RPC requests. From 6f4ac9844578fc51003fbd99525aff77e8794104 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sat, 26 Nov 2022 11:12:52 +0200 Subject: [PATCH 10/30] Move event stream polling to `SyncingEngine` Subscribe to `NetworkStreamEvent` and poll the incoming notifications and substream events from `SyncingEngine`. The code needs refactoring. --- client/network-gossip/src/bridge.rs | 2 +- client/network/common/src/protocol/event.rs | 23 ++++ client/network/src/behaviour.rs | 35 +++++ client/network/src/protocol.rs | 135 ++++++-------------- client/network/src/service.rs | 35 ++++- client/network/src/service/out_events.rs | 12 ++ client/network/sync/src/engine.rs | 95 +++++++++++++- client/network/transactions/src/lib.rs | 2 +- 8 files changed, 233 insertions(+), 106 deletions(-) diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 3b7bd4e2464c0..53393999afc0a 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -223,7 +223,7 @@ impl Future for GossipEngine { this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) => {}, + Event::Dht(_) | Event::UncheckedNotificationStreamOpened { .. } => {}, }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index f3d5ea1a3c2b0..46ca7b684c854 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -69,6 +69,29 @@ pub enum Event { role: ObservedRole, }, + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + /// + /// Protocol must validate the handshake and close the substream if the handshake is invalid. + UncheckedNotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + /// This is always equal to the value of + /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the + /// configured sets. + protocol: ProtocolName, + /// If the negotiation didn't use the main name of the protocol (the one in + /// `notifications_protocol`), then this field contains which name has actually been + /// used. + /// Always contains a value equal to the value in + /// `sc_network::config::NonDefaultSetConfig::fallback_names`. + negotiated_fallback: Option, + /// Received handshake. + received_handshake: Vec, + }, + /// Closed a substream with the given node. Always matches a corresponding previous /// `NotificationStreamOpened` message. NotificationStreamClosed { diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index e35d16acbc3c3..9abd965d19d5b 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -120,6 +120,28 @@ pub enum BehaviourOut { role: ObservedRole, }, + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + /// + /// Protocol must validate the received handshake and close the substream if the handshake is + /// invalid. + UncheckedNotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + protocol: ProtocolName, + /// If the negotiation didn't use the main name of the protocol (the one in + /// `notifications_protocol`), then this field contains which name has actually been + /// used. + /// See also [`crate::Event::NotificationStreamOpened`]. + negotiated_fallback: Option, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + /// Received handshake. + received_handshake: Vec, + }, + /// The [`NotificationsSink`] object used to send notifications with the given peer must be /// replaced with a new one. /// @@ -305,6 +327,19 @@ impl From> for BehaviourOut { role: reported_roles_to_observed_role(roles), notifications_sink, }, + CustomMessageOutcome::UncheckedNotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + received_handshake, + notifications_sink, + } => BehaviourOut::UncheckedNotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + received_handshake, + notifications_sink, + }, CustomMessageOutcome::NotificationStreamReplaced { remote, protocol, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5f79b502344e9..7afe2fb5ba8af 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -36,7 +36,7 @@ use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_network_common::{ config::NonReservedPeerMode, error, - protocol::{role::Roles, ProtocolName}, + protocol::{event::Event, role::Roles, ProtocolName}, sync::{ message::{BlockAnnounce, BlockAnnouncesHandshake}, ExtendedPeerInfo, @@ -93,7 +93,9 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - // TODO: remove eventually + // TODO(aaro): remove + event_stream: Pin + Send>>, + // TODO(aaro): remove eventually engine: SyncingEngine, } @@ -114,6 +116,7 @@ where network_config: &config::NetworkConfiguration, block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, engine: SyncingEngine, + event_stream: Pin + Send>>, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let mut known_addresses = Vec::new(); @@ -197,6 +200,7 @@ where .collect(), bad_handshake_substreams: Default::default(), engine, + event_stream, }; Ok((protocol, peerset_handle, known_addresses)) @@ -401,6 +405,17 @@ pub enum CustomMessageOutcome { roles: Roles, notifications_sink: NotificationsSink, }, + /// Notification protocols have been opened with a remote. + /// + /// Protocol must validate the received handshake and close the substream if it is invalid. + UncheckedNotificationStreamOpened { + remote: PeerId, + protocol: ProtocolName, + /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. + negotiated_fallback: Option, + received_handshake: Vec, + notifications_sink: NotificationsSink, + }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, @@ -492,7 +507,7 @@ where } // poll syncing engine - self.engine.poll(cx); + self.engine.poll(cx, &mut self.event_stream); if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) @@ -530,25 +545,19 @@ where // announces substream. match as DecodeAll>::decode_all(&mut &received_handshake[..]) { Ok(GenericMessage::Status(handshake)) => { - let handshake = BlockAnnouncesHandshake { + let handshake = BlockAnnouncesHandshake:: { roles: handshake.roles, best_number: handshake.best_number, best_hash: handshake.best_hash, genesis_hash: handshake.genesis_hash, }; - let roles = handshake.roles; - if self.engine.on_sync_peer_connected(peer_id, handshake).is_ok() { - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)] - .clone(), - negotiated_fallback, - roles, - notifications_sink, - } - } else { - CustomMessageOutcome::None + CustomMessageOutcome::UncheckedNotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)].clone(), + negotiated_fallback, + received_handshake: handshake.encode(), + notifications_sink, } }, Ok(msg) => { @@ -561,43 +570,12 @@ where self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None }, - Err(err) => { - match as DecodeAll>::decode_all( - &mut &received_handshake[..], - ) { - Ok(handshake) => { - let roles = handshake.roles; - if self - .engine - .on_sync_peer_connected(peer_id, handshake) - .is_ok() - { - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id)] - .clone(), - negotiated_fallback, - roles, - notifications_sink, - } - } else { - CustomMessageOutcome::None - } - }, - Err(err2) => { - debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {} & {}", - peer_id, - received_handshake, - err, - err2, - ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - }, - } + Err(err) => CustomMessageOutcome::UncheckedNotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)].clone(), + negotiated_fallback, + received_handshake, + notifications_sink, }, } } else { @@ -636,9 +614,7 @@ where } }, NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => - if set_id == HARDCODED_PEERSETS_SYNC || - self.bad_handshake_substreams.contains(&(peer_id, set_id)) - { + if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None } else { CustomMessageOutcome::NotificationStreamReplaced { @@ -649,18 +625,7 @@ where }, // TODO(aaro): listen on event stream in `SyncingEngine` NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { - // Set number 0 is hardcoded the default set of peers we sync from. - if set_id == HARDCODED_PEERSETS_SYNC { - if self.engine.on_sync_peer_disconnected(peer_id).is_err() { - log::trace!( - target: "sync", - "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", - peer_id - ); - } - - CustomMessageOutcome::None - } else if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { + if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { // The substream that has just been closed had been opened with a bad // handshake. The outer layers have never received an opening event about this // substream, and consequently shouldn't receive a closing event either. @@ -672,42 +637,16 @@ where } } }, - NotificationsOut::Notification { peer_id, set_id, message } => match set_id { - HARDCODED_PEERSETS_SYNC if self.engine.peers.contains_key(&peer_id) => { - if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.engine.push_block_announce_validation(peer_id, announce); - - // Make sure that the newly added block announce validation future was - // polled once to be registered in the task. - if let Poll::Ready(res) = - self.engine.chain_sync.poll_block_announce_validation(cx) - { - self.engine.process_block_announce_validation_result(res) - } - - CustomMessageOutcome::None - } else { - warn!(target: "sub-libp2p", "Failed to decode block announce"); - CustomMessageOutcome::None - } - }, - HARDCODED_PEERSETS_SYNC => { - trace!( - target: "sync", - "Received sync for peer earlier refused by sync layer: {}", - peer_id - ); + NotificationsOut::Notification { peer_id, set_id, message } => { + if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { CustomMessageOutcome::None - }, - _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => - CustomMessageOutcome::None, - _ => { + } else { let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { remote: peer_id, messages: vec![(protocol_name, message.freeze())], } - }, + } }, }; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 8bd3b6ee706a6..6fc0ec47c27f9 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -225,11 +225,13 @@ where local_peer_id.to_base58(), ); + let (tx, rx) = out_events::channel("block-announce-protocol"); let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), ¶ms.network_config, params.block_announce_config, params.engine, + Box::pin(rx), )?; // List of multiaddresses that we know in the network. @@ -443,13 +445,16 @@ where _marker: PhantomData, }); + let mut event_streams = out_events::OutChannels::new(params.metrics_registry.as_ref())?; + event_streams.push(tx); + Ok(NetworkWorker { external_addresses, num_connected, network_service: swarm, service, from_service, - event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, + event_streams, peers_notifications_sinks, metrics, boot_node_ids, @@ -1539,6 +1544,34 @@ where role, }); }, + Poll::Ready(SwarmEvent::Behaviour( + BehaviourOut::UncheckedNotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + notifications_sink, + received_handshake, + }, + )) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics + .notifications_streams_opened_total + .with_label_values(&[&protocol]) + .inc(); + } + { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + let _previous_value = peers_notifications_sinks + .insert((remote, protocol.clone()), notifications_sink); + debug_assert!(_previous_value.is_none()); + } + this.event_streams.send(Event::UncheckedNotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + received_handshake, + }); + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, protocol, diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index c8f3e1b1a7a3f..848134e7a89b8 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -244,6 +244,13 @@ impl Metrics { .inc_by(num); }); }, + Event::UncheckedNotificationStreamOpened { protocol, .. } => { + format_label("notif-open-", protocol, |protocol_label| { + self.events_total + .with_label_values(&[protocol_label, "sent", name]) + .inc_by(num); + }); + }, Event::NotificationStreamClosed { protocol, .. } => { format_label("notif-closed-", protocol, |protocol_label| { self.events_total @@ -275,6 +282,11 @@ impl Metrics { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); }); }, + Event::UncheckedNotificationStreamOpened { protocol, .. } => { + format_label("notif-open-", protocol, |protocol_label| { + self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); + }); + }, Event::NotificationStreamClosed { protocol, .. } => { format_label("notif-closed-", protocol, |protocol_label| { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index ece397679a338..962c24ef59581 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -28,12 +28,12 @@ use prometheus_endpoint::{ register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, }; -use codec::Encode; +use codec::{Decode, DecodeAll, Encode}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network_common::{ config::{NonDefaultSetConfig, ProtocolId}, - protocol::{role::Roles, ProtocolName}, + protocol::{event::Event, role::Roles, ProtocolName}, sync::{ message::{ generic::{BlockData, BlockResponse}, @@ -80,8 +80,8 @@ mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - // /// We received a message that failed to decode. - // pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer role does not match (e.g. light peer connecting to another light peer). @@ -536,7 +536,11 @@ where ) } - pub fn poll(&mut self, cx: &mut std::task::Context) { + pub fn poll( + &mut self, + cx: &mut std::task::Context, + event_stream: &mut Pin + Send>>, + ) { self.num_connected.store(self.peers.len(), Ordering::Relaxed); self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); @@ -545,6 +549,87 @@ where self.report_metrics(); } + while let Poll::Ready(Some(event)) = event_stream.poll_next_unpin(cx) { + match event { + Event::UncheckedNotificationStreamOpened { + remote, + protocol, + received_handshake, + .. + } => { + if protocol != self.block_announce_protocol_name { + continue + } + + match as DecodeAll>::decode_all( + &mut &received_handshake[..], + ) { + Ok(handshake) => { + if self.on_sync_peer_connected(remote, handshake).is_err() { + log::debug!( + target: "sync", + "Failed to register peer {remote:?}: {received_handshake:?}", + ); + } + }, + Err(err) => { + log::debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {}", + remote, + received_handshake, + err, + ); + self.network_service.report_peer(remote, rep::BAD_MESSAGE); + }, + } + }, + Event::NotificationStreamClosed { remote, protocol } => { + if protocol != self.block_announce_protocol_name { + continue + } + + if self.on_sync_peer_disconnected(remote).is_err() { + log::trace!( + target: "sync", + "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", + remote + ); + } + }, + Event::NotificationsReceived { remote, messages } => { + for (protocol, message) in messages { + if protocol != self.block_announce_protocol_name { + continue + } + + if self.peers.contains_key(&remote) { + if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { + self.push_block_announce_validation(remote, announce); + + // Make sure that the newly added block announce validation future + // was polled once to be registered in the task. + if let Poll::Ready(res) = + self.chain_sync.poll_block_announce_validation(cx) + { + self.process_block_announce_validation_result(res) + } + } else { + log::warn!(target: "sub-libp2p", "Failed to decode block announce"); + } + } else { + log::trace!( + target: "sync", + "Received sync for peer earlier refused by sync layer: {}", + remote + ); + } + } + }, + _ => {}, + } + } + while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { match event { ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index bb2ce9bb1a3d1..bb9e5531176ca 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -334,7 +334,7 @@ where async fn handle_network_event(&mut self, event: Event) { match event { - Event::Dht(_) => {}, + Event::Dht(_) | Event::UncheckedNotificationStreamOpened { .. } => {}, Event::NotificationStreamOpened { remote, protocol, role, .. } if protocol == self.protocol_name => { From e2ea27758b4a894bf808f10668462eb6c3ccbc0c Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sat, 26 Nov 2022 13:26:43 +0200 Subject: [PATCH 11/30] Make `SyncingEngine` into an asynchronous runner This commits removes the last hard dependency of syncing from `sc-network` meaning the protocol now lives completely outside of `sc-network`, ignoring the hardcoded peerset entry which will be addressed in the future. Code needs a lot of refactoring. --- client/consensus/babe/src/tests.rs | 1 + client/network/src/config.rs | 5 +- client/network/src/protocol.rs | 75 +++-------- client/network/src/service.rs | 60 ++++----- client/network/src/service/tests/mod.rs | 3 +- client/network/src/service/tests/service.rs | 29 ++++- client/network/sync/src/engine.rs | 10 +- client/network/test/src/lib.rs | 134 ++++++++++++-------- client/network/test/src/sync.rs | 22 ++-- client/service/src/builder.rs | 7 +- client/service/src/lib.rs | 6 +- 11 files changed, 188 insertions(+), 164 deletions(-) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 6ef5f0ee41624..9d4c24d654363 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -353,6 +353,7 @@ impl TestNetFactory for BabeTestNet { } fn peers_mut(&mut self) -> &mut Vec { + trace!(target: "babe", "Retrieving peers, mutable"); &mut self.peers } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index c8449386933d9..82c1a4f9f9f3e 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -82,9 +82,8 @@ where /// name on the wire. pub fork_id: Option, - /// Syncing engine. - pub engine: SyncingEngine, - + // /// Syncing engine. + // pub engine: SyncingEngine, /// Interface that can be used to delegate syncing-related function calls to `ChainSync` pub chain_sync_service: Box>, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 7afe2fb5ba8af..70135d58b66d4 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -95,8 +95,10 @@ pub struct Protocol { bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, // TODO(aaro): remove event_stream: Pin + Send>>, + _marker: std::marker::PhantomData, + peers: HashSet, // TODO(aaro): remove eventually - engine: SyncingEngine, + // engine: SyncingEngine, } impl Protocol @@ -115,7 +117,7 @@ where roles: Roles, network_config: &config::NetworkConfiguration, block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, - engine: SyncingEngine, + // engine: SyncingEngine, event_stream: Pin + Send>>, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let mut known_addresses = Vec::new(); @@ -199,7 +201,8 @@ where .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) .collect(), bad_handshake_substreams: Default::default(), - engine, + _marker: Default::default(), + peers: HashSet::new(), event_stream, }; @@ -221,6 +224,7 @@ where if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position)); + self.peers.remove(peer_id); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } @@ -234,43 +238,7 @@ where // TODO(aaro): implement using behaviour? /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.engine.peers.len() - } - - /// Returns the number of peers we're connected to and that are being queried. - pub fn num_active_peers(&self) -> usize { - self.engine.chain_sync.num_active_peers() - } - - /// Target sync block number. - pub fn best_seen_block(&self) -> Option> { - self.engine.chain_sync.status().best_seen_block - } - - /// Number of peers participating in syncing. - pub fn num_sync_peers(&self) -> u32 { - self.engine.chain_sync.status().num_peers - } - - /// Number of blocks in the import queue. - pub fn num_queued_blocks(&self) -> u32 { - self.engine.chain_sync.status().queued_blocks - } - - /// Number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { - self.engine.chain_sync.num_downloaded_blocks() - } - - /// Number of active sync requests. - pub fn num_sync_requests(&self) -> usize { - self.engine.chain_sync.num_sync_requests() - } - - // TODO(aaro): implement using ChainSyncInterface - /// Returns information about all the peers we are connected to after the handshake message. - pub fn peers_info(&self) -> impl Iterator)> { - self.engine.peers.iter().map(|(id, peer)| (id, &peer.info)) + self.peers.len() } /// Adjusts the reputation of a node. @@ -506,9 +474,6 @@ where return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } - // poll syncing engine - self.engine.poll(cx, &mut self.event_stream); - if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } @@ -538,6 +503,7 @@ where notifications_sink, negotiated_fallback, } => { + self.peers.insert(peer_id); // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { // `received_handshake` can be either a `Status` message if received from the @@ -568,6 +534,7 @@ where msg, ); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + self.peers.remove(&peer_id); CustomMessageOutcome::None }, Err(err) => CustomMessageOutcome::UncheckedNotificationStreamOpened { @@ -579,10 +546,8 @@ where }, } } else { - match ( - Roles::decode_all(&mut &received_handshake[..]), - self.engine.peers.get(&peer_id), - ) { + // TODO(aaro): fix this + match (Roles::decode_all(&mut &received_handshake[..]), None::) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, protocol: self.notification_protocols[usize::from(set_id)].clone(), @@ -591,23 +556,25 @@ where notifications_sink, }, (Err(_), Some(peer)) if received_handshake.is_empty() => { + panic!("not supported anymore"); // As a convenience, we allow opening substreams for "external" // notification protocols with an empty handshake. This fetches the // roles from the locally-known roles. // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - negotiated_fallback, - roles: peer.info.roles, - notifications_sink, - } + // CustomMessageOutcome::NotificationStreamOpened { + // remote: peer_id, + // protocol: self.notification_protocols[usize::from(set_id)].clone(), + // negotiated_fallback, + // roles: peer.info.roles, + // notifications_sink, + // } }, (Err(err), _) => { debug!(target: "sync", "Failed to parse remote handshake: {}", err); self.bad_handshake_substreams.insert((peer_id, set_id)); self.behaviour.disconnect_peer(&peer_id, set_id); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + self.peers.remove(&peer_id); CustomMessageOutcome::None }, } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 6fc0ec47c27f9..a6c0011c7cf04 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -230,7 +230,7 @@ where From::from(¶ms.role), ¶ms.network_config, params.block_announce_config, - params.engine, + // params.engine, Box::pin(rx), )?; @@ -486,35 +486,35 @@ where self.network_service.behaviour().user_protocol().num_connected_peers() } - /// Returns the number of peers we're connected to and that are being queried. - pub fn num_active_peers(&self) -> usize { - self.network_service.behaviour().user_protocol().num_active_peers() - } + // /// Returns the number of peers we're connected to and that are being queried. + // pub fn num_active_peers(&self) -> usize { + // self.network_service.behaviour().user_protocol().num_active_peers() + // } - /// Target sync block number. - pub fn best_seen_block(&self) -> Option> { - self.network_service.behaviour().user_protocol().best_seen_block() - } + // /// Target sync block number. + // pub fn best_seen_block(&self) -> Option> { + // self.network_service.behaviour().user_protocol().best_seen_block() + // } - /// Number of peers participating in syncing. - pub fn num_sync_peers(&self) -> u32 { - self.network_service.behaviour().user_protocol().num_sync_peers() - } + // /// Number of peers participating in syncing. + // pub fn num_sync_peers(&self) -> u32 { + // self.network_service.behaviour().user_protocol().num_sync_peers() + // } - /// Number of blocks in the import queue. - pub fn num_queued_blocks(&self) -> u32 { - self.network_service.behaviour().user_protocol().num_queued_blocks() - } + // /// Number of blocks in the import queue. + // pub fn num_queued_blocks(&self) -> u32 { + // self.network_service.behaviour().user_protocol().num_queued_blocks() + // } - /// Returns the number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { - self.network_service.behaviour().user_protocol().num_downloaded_blocks() - } + // /// Returns the number of downloaded blocks. + // pub fn num_downloaded_blocks(&self) -> usize { + // self.network_service.behaviour().user_protocol().num_downloaded_blocks() + // } - /// Number of active sync requests. - pub fn num_sync_requests(&self) -> usize { - self.network_service.behaviour().user_protocol().num_sync_requests() - } + // /// Number of active sync requests. + // pub fn num_sync_requests(&self) -> usize { + // self.network_service.behaviour().user_protocol().num_sync_requests() + // } /// Adds an address for a node. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { @@ -630,16 +630,6 @@ where } } - /// Get currently connected peers. - pub fn peers_debug_info(&mut self) -> Vec<(PeerId, ExtendedPeerInfo)> { - self.network_service - .behaviour_mut() - .user_protocol_mut() - .peers_info() - .map(|(id, info)| (*id, info.clone())) - .collect() - } - /// Removes a `PeerId` from the list of reserved peers. pub fn remove_reserved_peer(&self, peer: PeerId) { self.service.remove_reserved_peer(peer); diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index fa6a394e50d2f..77b78f52ce33f 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -292,7 +292,6 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - engine, chain_sync_service: Box::new(chain_sync_service), metrics_registry: None, request_response_protocol_configs: [ @@ -318,6 +317,8 @@ impl TestNetworkBuilder { async_std::task::sleep(std::time::Duration::from_millis(250)).await; } }); + let stream = worker.service().event_stream("syncing"); + async_std::task::spawn(async move { engine.run(stream).await }); TestNetwork::new(worker) } diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 871a0b7f23521..1ff930de8d73c 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -207,6 +207,9 @@ fn notifications_state_consistent() { // Add new events here. future::Either::Left(Event::Dht(_)) => {}, future::Either::Right(Event::Dht(_)) => {}, + + future::Either::Left(Event::UncheckedNotificationStreamOpened { .. }) => {}, + future::Either::Right(Event::UncheckedNotificationStreamOpened { .. }) => {}, }; } }); @@ -214,6 +217,7 @@ fn notifications_state_consistent() { #[async_std::test] async fn lots_of_incoming_peers_works() { + sp_tracing::try_init_simple(); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (main_node, _) = TestNetworkBuilder::new() @@ -247,6 +251,7 @@ async fn lots_of_incoming_peers_works() { let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); let mut event_stream = event_stream.fuse(); + let mut sync_protocol_name = None; loop { futures::select! { _ = timer => { @@ -255,15 +260,22 @@ async fn lots_of_incoming_peers_works() { } ev = event_stream.next() => { match ev.unwrap() { + Event::UncheckedNotificationStreamOpened { protocol, .. } => { + if let None = sync_protocol_name { + sync_protocol_name = Some(protocol.clone()); + } + } Event::NotificationStreamOpened { remote, .. } => { assert_eq!(remote, main_node_peer_id); // Test succeeds after 5 seconds. This timer is here in order to // detect a potential problem after opening. timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); } - Event::NotificationStreamClosed { .. } => { - // Test failed. - panic!(); + Event::NotificationStreamClosed { protocol, .. } => { + if Some(protocol) != sync_protocol_name { + // Test failed. + panic!(); + } } _ => {} } @@ -288,10 +300,19 @@ fn notifications_back_pressure() { let receiver = async_std::task::spawn(async move { let mut received_notifications = 0; + let mut sync_protocol_name = None; while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { - Event::NotificationStreamClosed { .. } => panic!(), + Event::UncheckedNotificationStreamOpened { protocol, .. } => + if let None = sync_protocol_name { + sync_protocol_name = Some(protocol); + }, + Event::NotificationStreamClosed { protocol, .. } => { + if Some(protocol) != sync_protocol_name { + panic!() + } + }, Event::NotificationsReceived { messages, .. } => for message in messages { assert_eq!(message.0, PROTOCOL_NAME.into()); diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 962c24ef59581..c6ec78a79221f 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -536,11 +536,17 @@ where ) } + pub async fn run(mut self, mut stream: Pin + Send>>) { + loop { + futures::future::poll_fn(|cx| self.poll(cx, &mut stream)).await; + } + } + pub fn poll( &mut self, cx: &mut std::task::Context, event_stream: &mut Pin + Send>>, - ) { + ) -> Poll<()> { self.num_connected.store(self.peers.len(), Ordering::Relaxed); self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); @@ -721,6 +727,8 @@ where while let Poll::Ready(result) = self.chain_sync.poll(cx) { self.process_block_announce_validation_result(result); } + + Poll::Pending } /// Called by peer when it is disconnecting. diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index e4fddd9e4e475..f092feb1a01c1 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -50,14 +50,14 @@ use sc_consensus::{ }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - ChainSyncInterface, Multiaddr, NetworkService, NetworkWorker, + ChainSyncInterface, ChainSyncService, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ config::{ MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, }, protocol::{role::Roles, ProtocolName}, - service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, + service::{NetworkBlock, NetworkEventStream, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -271,13 +271,13 @@ where } /// Returns the number of downloaded blocks. - pub fn num_downloaded_blocks(&self) -> usize { - self.network.num_downloaded_blocks() + pub async fn num_downloaded_blocks(&self) -> usize { + self.chain_sync_service.num_downloaded_blocks().await.unwrap() } /// Returns true if we have no peer. pub fn is_offline(&self) -> bool { - self.num_peers() == 0 + self.chain_sync_service.is_offline() } /// Request a justification for the given block. @@ -715,13 +715,14 @@ pub struct FullPeerConfig { pub storage_chain: bool, } -pub trait TestNetFactory: Default + Sized +#[async_trait::async_trait] +pub trait TestNetFactory: Default + Sized + Send where >::Transaction: Send, { type Verifier: 'static + Verifier; type BlockImport: BlockImport + Clone + Send + Sync + 'static; - type PeerData: Default; + type PeerData: Default + Send; /// This one needs to be implemented! fn make_verifier(&self, client: PeersClient, peer_data: &Self::PeerData) -> Self::Verifier; @@ -729,6 +730,7 @@ where /// Get reference to peer. fn peer(&mut self, i: usize) -> &mut Peer; fn peers(&self) -> &Vec>; + fn peers_mut(&mut self) -> &mut Vec>; fn mut_peers>)>( &mut self, closure: F, @@ -952,7 +954,7 @@ where chain: client.clone(), protocol_id, fork_id, - engine, + // engine, // TODO(aaro): fix arcs chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: None, @@ -973,9 +975,13 @@ where async_std::task::spawn(async move { chain_sync_network_provider.run(service).await; }); - let service = Box::new(chain_sync_service.clone()); + let service = chain_sync_service.clone(); + async_std::task::spawn(async move { + import_queue.run(Box::new(service)).await; + }); + let service = network.service().clone(); async_std::task::spawn(async move { - import_queue.run(service).await; + engine.run(service.event_stream("syncing")).await; }); self.mut_peers(move |peers| { @@ -1009,60 +1015,55 @@ where async_std::task::spawn(f); } - /// Polls the testnet until all nodes are in sync. + /// Polls the testnet until all peers are connected to each other. /// /// Must be executed in a task context. - fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { + fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { self.poll(cx); - // Return `NotReady` if there's a mismatch in the highest block number. + let num_peers = self.peers().len(); + if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { + return Poll::Ready(()) + } + + Poll::Pending + } + + async fn is_in_sync(&mut self) -> bool { let mut highest = None; - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending + let peers = self.peers_mut(); + + for peer in peers { + if peer.chain_sync_service.is_major_syncing() || + peer.chain_sync_service.num_queued_blocks().await.unwrap() != 0 + { + return false } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending + if peer.chain_sync_service.num_sync_requests().await.unwrap() != 0 { + return false } match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Poll::Pending, + (Some(_), _) => return false, } } - Poll::Ready(()) - } - /// Polls the testnet until theres' no activiy of any kind. - /// - /// Must be executed in a task context. - fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); + true + } - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending + async fn is_idle(&mut self) -> bool { + let peers = self.peers_mut(); + for peer in peers { + if peer.chain_sync_service.num_queued_blocks().await.unwrap() != 0 { + return false } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending + if peer.chain_sync_service.num_sync_requests().await.unwrap() != 0 { + return false } } - Poll::Ready(()) - } - - /// Polls the testnet until all peers are connected to each other. - /// - /// Must be executed in a task context. - fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - let num_peers = self.peers().len(); - if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { - return Poll::Ready(()) - } - - Poll::Pending + true } /// Blocks the current thread until we are sync'ed. @@ -1070,10 +1071,19 @@ where /// Calls `poll_until_sync` repeatedly. /// (If we've not synced within 10 mins then panic rather than hang.) fn block_until_sync(&mut self) { - futures::executor::block_on(timeout( - Duration::from_secs(10 * 60), - futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx)), - )) + futures::executor::block_on(timeout(Duration::from_secs(10 * 60), async move { + loop { + futures::future::poll_fn::<(), _>(|cx| { + self.poll(cx); + Poll::Ready(()) + }) + .await; + + if self.is_in_sync().await { + break + } + } + })) .expect("sync didn't happen within 10 mins"); } @@ -1081,9 +1091,19 @@ where /// /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. fn block_until_idle(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { - self.poll_until_idle(cx) - })); + futures::executor::block_on(async move { + loop { + futures::future::poll_fn::<(), _>(|cx| { + self.poll(cx); + Poll::Ready(()) + }) + .await; + + if self.is_idle().await { + break + } + } + }); } /// Blocks the current thread until all peers are connected to each other. @@ -1158,6 +1178,10 @@ impl TestNetFactory for TestNet { &self.peers } + fn peers_mut(&mut self) -> &mut Vec> { + &mut self.peers + } + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } @@ -1205,6 +1229,10 @@ impl TestNetFactory for JustificationTestNet { self.0.peers() } + fn peers_mut(&mut self) -> &mut Vec> { + self.0.peers_mut() + } + fn mut_peers>)>( &mut self, closure: F, diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 8c6a8b14cd776..c7f345704a364 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -630,12 +630,12 @@ fn imports_stale_once() { // check that NEW block is imported from announce message let new_hash = net.peer(0).push_blocks(1, false); import_with_announce(&mut net, new_hash); - assert_eq!(net.peer(1).num_downloaded_blocks(), 1); + assert_eq!(futures::executor::block_on(net.peer(1).num_downloaded_blocks()), 1); // check that KNOWN STALE block is imported from announce message let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); import_with_announce(&mut net, known_stale_hash); - assert_eq!(net.peer(1).num_downloaded_blocks(), 2); + assert_eq!(futures::executor::block_on(net.peer(1).num_downloaded_blocks()), 2); } #[test] @@ -1007,13 +1007,19 @@ fn syncs_all_forks_from_single_peer() { let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true); // Wait till peer 1 starts downloading - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).network().best_seen_block() != Some(12) { - return Poll::Pending + block_on(async { + loop { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Poll::Ready(()) + }) + .await; + + if net.peer(1).sync_service().best_seen_block().await.unwrap() == Some(12) { + break + } } - Poll::Ready(()) - })); + }); // Peer 0 produces and announces another fork let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ab65e06247772..9fe48e45e7a29 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -41,7 +41,7 @@ use sc_network::{config::SyncMode, ChainSyncInterface, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ protocol::role::Roles, - service::{NetworkStateInfo, NetworkStatusProvider}, + service::{NetworkEventStream, NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -939,7 +939,7 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - engine, + // engine, chain_sync_service: Box::new(chain_sync_service.clone()), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, @@ -991,6 +991,9 @@ where ); spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service))); + let event_stream = network.event_stream("syncing"); + spawn_handle.spawn("syncing", None, engine.run(event_stream)); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 263189b51e650..a21ad7b4dc4ab 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -201,7 +201,7 @@ async fn build_network_future< match request { sc_rpc::system::Request::Health(sender) => { let _ = sender.send(sc_rpc::system::Health { - peers: network.peers_debug_info().len(), + peers: sync_service.peers_info().await.expect("syncing to stay active").len(), is_syncing: network.service().is_major_syncing(), should_have_peers, }); @@ -218,7 +218,7 @@ async fn build_network_future< let _ = sender.send(addresses); }, sc_rpc::system::Request::Peers(sender) => { - let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)| + let _ = sender.send(sync_service.peers_info().await.expect("syncing to stay active").into_iter().map(|(peer_id, p)| sc_rpc::system::PeerInfo { peer_id: peer_id.to_base58(), roles: format!("{:?}", p.roles), @@ -281,7 +281,7 @@ async fn build_network_future< let _ = sender.send(SyncState { starting_block, current_block: best_number, - highest_block: network.best_seen_block().unwrap_or(best_number), + highest_block: sync_service.best_seen_block().await.expect("syncing to stay active").unwrap_or(best_number), }); } } From caf54b427a63a4e74a9fe421feac1c592575e03f Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sun, 27 Nov 2022 06:53:52 +0200 Subject: [PATCH 12/30] Fix warnings --- bin/node/cli/src/service.rs | 2 +- client/beefy/src/worker.rs | 1 - client/network/common/src/sync.rs | 1 - client/network/src/config.rs | 1 - client/network/src/protocol.rs | 31 +++++-------------- client/network/src/service.rs | 9 +----- client/network/sync/src/service/chain_sync.rs | 7 +---- client/network/test/src/lib.rs | 2 +- 8 files changed, 12 insertions(+), 42 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 6e6d1c0adde71..e1f6d1b686e85 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,7 +29,7 @@ use node_primitives::Block; use sc_client_api::BlockBackend; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::{ChainSyncInterface, NetworkService}; +use sc_network::NetworkService; use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 55f103de05879..48d176daf2d46 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -30,7 +30,6 @@ use parking_lot::Mutex; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_network_common::{ - protocol::event::Event as NetEvent, service::{NetworkEventStream, NetworkRequest}, sync::{SyncEvent, SyncEventStream}, }; diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 41942135bbf08..959a88078a2c8 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -468,7 +468,6 @@ pub trait ChainSync: Send { fn send_block_request(&mut self, who: PeerId, request: BlockRequest); } -// TODO(aaro): is this needed at all? #[async_trait::async_trait] pub trait ChainSyncService: Send + Sync { /// Returns the number of peers we're connected to and that are being queried. diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 82c1a4f9f9f3e..3d4103551f8f3 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -30,7 +30,6 @@ pub use sc_network_common::{ sync::warp::WarpSyncProvider, ExHashT, }; -use sc_network_sync::engine::SyncingEngine; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 70135d58b66d4..7248dfeae5c24 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -19,8 +19,7 @@ use crate::config; use bytes::Bytes; -use codec::{Decode, DecodeAll, Encode}; -use futures::prelude::*; +use codec::{DecodeAll, Encode}; use libp2p::{ core::{connection::ConnectionId, transport::ListenerId, ConnectedPoint}, swarm::{ @@ -29,29 +28,22 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, trace, warn}; +use log::{debug, error, warn}; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_network_common::{ config::NonReservedPeerMode, error, - protocol::{event::Event, role::Roles, ProtocolName}, - sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake}, - ExtendedPeerInfo, - }, - utils::interval, + protocol::{role::Roles, ProtocolName}, + sync::message::BlockAnnouncesHandshake, }; -use sc_network_sync::engine::SyncingEngine; use sp_blockchain::HeaderMetadata; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ collections::{HashSet, VecDeque}, io, iter, - pin::Pin, task::Poll, - time, }; mod notifications; @@ -93,12 +85,8 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - // TODO(aaro): remove - event_stream: Pin + Send>>, - _marker: std::marker::PhantomData, peers: HashSet, - // TODO(aaro): remove eventually - // engine: SyncingEngine, + _marker: std::marker::PhantomData, } impl Protocol @@ -117,8 +105,6 @@ where roles: Roles, network_config: &config::NetworkConfiguration, block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, - // engine: SyncingEngine, - event_stream: Pin + Send>>, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let mut known_addresses = Vec::new(); @@ -203,7 +189,6 @@ where bad_handshake_substreams: Default::default(), _marker: Default::default(), peers: HashSet::new(), - event_stream, }; Ok((protocol, peerset_handle, known_addresses)) @@ -537,7 +522,7 @@ where self.peers.remove(&peer_id); CustomMessageOutcome::None }, - Err(err) => CustomMessageOutcome::UncheckedNotificationStreamOpened { + Err(_err) => CustomMessageOutcome::UncheckedNotificationStreamOpened { remote: peer_id, protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, @@ -555,7 +540,7 @@ where roles, notifications_sink, }, - (Err(_), Some(peer)) if received_handshake.is_empty() => { + (Err(_), Some(_peer)) if received_handshake.is_empty() => { panic!("not supported anymore"); // As a convenience, we allow opening substreams for "external" // notification protocols with an empty handshake. This fetches the diff --git a/client/network/src/service.rs b/client/network/src/service.rs index a6c0011c7cf04..fea16ab2e829f 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -69,7 +69,6 @@ use sc_network_common::{ NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, - sync::ExtendedPeerInfo, ExHashT, }; use sc_peerset::PeersetHandle; @@ -225,13 +224,10 @@ where local_peer_id.to_base58(), ); - let (tx, rx) = out_events::channel("block-announce-protocol"); let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), ¶ms.network_config, params.block_announce_config, - // params.engine, - Box::pin(rx), )?; // List of multiaddresses that we know in the network. @@ -445,16 +441,13 @@ where _marker: PhantomData, }); - let mut event_streams = out_events::OutChannels::new(params.metrics_registry.as_ref())?; - event_streams.push(tx); - Ok(NetworkWorker { external_addresses, num_connected, network_service: swarm, service, from_service, - event_streams, + event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, metrics, boot_node_ids, diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 6aa1617cbb2c6..c797c43de7c9d 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -16,10 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -// TODO(aaro): reorder traits properly -// TODO(aaro): document functions -// TODO(aaro): rename this file to sync_service.rs? - use futures::{channel::oneshot, Stream}; use libp2p::PeerId; @@ -162,8 +158,8 @@ impl Link for ChainSyncInterfaceHandle { } impl SyncEventStream for ChainSyncInterfaceHandle { + /// Get syncing event stream. fn event_stream(&self, name: &'static str) -> Pin + Send>> { - println!("sync: register {name}"); let (tx, rx) = tracing_unbounded(name); let _ = self.tx.unbounded_send(ToServiceCommand::EventStream(tx)); Box::pin(rx) @@ -180,7 +176,6 @@ impl NetworkBlock> for ChainSyncInterfaceHandle } } -// TODO(aaro): is this needed at all? #[async_trait::async_trait] impl ChainSyncService for ChainSyncInterfaceHandle { async fn num_active_peers(&self) -> Result { diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index f092feb1a01c1..3888a705cd161 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -50,7 +50,7 @@ use sc_consensus::{ }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - ChainSyncInterface, ChainSyncService, Multiaddr, NetworkService, NetworkWorker, + ChainSyncService, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ config::{ From a920a7e0558e3f47076723decacb23d81f5a3f1e Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sun, 27 Nov 2022 09:38:44 +0200 Subject: [PATCH 13/30] Code refactoring --- client/beefy/src/tests.rs | 4 -- client/beefy/src/worker.rs | 3 +- .../src/communication/tests.rs | 11 +----- client/network-gossip/src/bridge.rs | 2 +- client/network-gossip/src/state_machine.rs | 2 +- client/network/src/config.rs | 6 +-- client/network/src/service.rs | 21 +++++----- client/network/src/service/tests/mod.rs | 11 ++---- client/network/sync/src/engine.rs | 6 +-- client/network/sync/src/lib.rs | 2 +- client/network/sync/src/service/chain_sync.rs | 22 +++++------ client/network/test/src/lib.rs | 39 +++++++++---------- client/service/src/builder.rs | 18 ++++----- 13 files changed, 63 insertions(+), 84 deletions(-) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 4181011b30c8c..790725a07d637 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -139,10 +139,6 @@ impl BeefyTestNet { }); } - pub(crate) fn block_until_sync_connected(&mut self) { - todo!(); - } - pub(crate) fn generate_blocks_and_sync( &mut self, count: usize, diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 48d176daf2d46..68af4b8566e99 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -984,8 +984,7 @@ pub(crate) mod tests { use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; use sc_client_api::{Backend as BackendT, HeaderBackend}; - use sc_network::{ChainSyncInterface, NetworkService}; - use sc_network_common::sync::SyncEventStream; + use sc_network::NetworkService; use sc_network_test::TestNetFactory; use sp_api::HeaderT; use sp_blockchain::Backend as BlockchainBackendT; diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 5cf0ca705b1fb..119201e5fe50c 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -191,14 +191,8 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } -pub(crate) enum SyncEvent { - EventStream(TracingUnboundedSender), -} - #[derive(Clone)] -pub(crate) struct TestSync { - sender: TracingUnboundedSender, -} +pub(crate) struct TestSync; impl SyncEventStream for TestSync { fn event_stream( @@ -278,8 +272,7 @@ fn voter_set_state() -> SharedVoterSetState { pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; - let (stx, srx) = tracing_unbounded("sync"); - let sync = Arc::new(TestSync { sender: stx }); + let sync = Arc::new(TestSync {}); #[derive(Clone)] struct Exit; diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 53393999afc0a..8b0325f45054d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -463,7 +463,7 @@ mod tests { unimplemented!(); } - fn set_notification_handshake(&self, _protocol: ProtocolName, handshake: Vec) { + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { unimplemented!(); } } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 817e4326a49c4..bf567e12bdff7 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -676,7 +676,7 @@ mod tests { unimplemented!(); } - fn set_notification_handshake(&self, _protocol: ProtocolName, handshake: Vec) { + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { unimplemented!(); } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 3d4103551f8f3..cf4583f5e97aa 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -33,7 +33,6 @@ pub use sc_network_common::{ pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ChainSyncInterface; use core::{fmt, iter}; use libp2p::{ identity::{ed25519, Keypair}, @@ -81,10 +80,9 @@ where /// name on the wire. pub fork_id: Option, - // /// Syncing engine. - // pub engine: SyncingEngine, + // TODO(aaro): remove this /// Interface that can be used to delegate syncing-related function calls to `ChainSync` - pub chain_sync_service: Box>, + pub sync_service: Arc>, /// Registry for recording prometheus metrics to. pub metrics_registry: Option, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index fea16ab2e829f..1d6ec24503cf1 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -35,7 +35,7 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, protocol::{self, NotificationsSink, NotifsHandlerError, Protocol, Ready}, - transport, ChainSyncInterface, ReputationChange, + transport, ReputationChange, }; use futures::{channel::oneshot, prelude::*}; @@ -117,8 +117,9 @@ pub struct NetworkService { peerset: PeersetHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender, + // TODO(aaro): remove this /// Interface that can be used to delegate calls to `ChainSync` - chain_sync_service: Box>, + sync_service: Arc>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -433,7 +434,7 @@ where local_peer_id, local_identity, to_worker, - chain_sync_service: params.chain_sync_service, + sync_service: params.sync_service, peers_notifications_sinks: peers_notifications_sinks.clone(), notifications_sizes_metric: metrics .as_ref() @@ -691,11 +692,11 @@ impl NetworkService { impl sp_consensus::SyncOracle for NetworkService { fn is_major_syncing(&self) -> bool { - self.chain_sync_service.is_major_syncing() + self.sync_service.is_major_syncing() } fn is_offline(&self) -> bool { - self.chain_sync_service.is_offline() + self.sync_service.is_offline() } } @@ -705,11 +706,11 @@ impl sc_consensus::JustificationSyncLink for NetworkSe /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self.chain_sync_service.request_justification(hash, number); + let _ = self.sync_service.request_justification(hash, number); } fn clear_justification_requests(&self) { - let _ = self.chain_sync_service.clear_justification_requests(); + let _ = self.sync_service.clear_justification_requests(); } } @@ -773,7 +774,7 @@ where /// a stale fork missing. /// Passing empty `peers` set effectively removes the sync request. fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - self.chain_sync_service.set_sync_fork_request(peers, hash, number); + self.sync_service.set_sync_fork_request(peers, hash, number); } } @@ -1088,11 +1089,11 @@ where H: ExHashT, { fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.chain_sync_service.announce_block(hash, data); + let _ = self.sync_service.announce_block(hash, data); } fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self.chain_sync_service.new_best_block_imported(hash, number); + let _ = self.sync_service.new_best_block_imported(hash, number); } } diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 77b78f52ce33f..54d624dc9202b 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -20,16 +20,12 @@ use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::Multiaddr; -use sc_client_api::{BlockBackend, HeaderBackend}; use sc_consensus::{ImportQueue, Link}; use sc_network_common::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - TransportConfig, - }, + config::{NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, protocol::{event::Event, role::Roles}, service::NetworkEventStream, - sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT}, + sync::ChainSync as ChainSyncT, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -37,9 +33,8 @@ use sc_network_sync::{ engine::SyncingEngine, service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, - ChainSync, }; -use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; +use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block as TestBlock, Hash as TestHash}, diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index c6ec78a79221f..98f906935c974 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -18,7 +18,7 @@ use crate::{ service::{self, chain_sync::ToServiceCommand}, - ChainSync, ChainSyncInterfaceHandle, ClientError, + ChainSync, ClientError, SyncingService, }; use futures::{Stream, StreamExt}; @@ -266,7 +266,7 @@ where default_peers_set_no_slot_peers: HashSet, default_peers_set_num_full: usize, default_peers_set_num_light: usize, - ) -> Result<(Self, ChainSyncInterfaceHandle, NonDefaultSetConfig), ClientError> { + ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let (chain_sync, block_announce_config) = ChainSync::new( mode, client.clone(), @@ -327,7 +327,7 @@ where None }, }, - ChainSyncInterfaceHandle::new(tx, num_connected, is_major_syncing), + SyncingService::new(tx, num_connected, is_major_syncing), block_announce_config, )) } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 2145ccf33bc8f..80ad00ddc712e 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -44,7 +44,6 @@ pub mod warp_request_handler; use crate::{ blocks::BlockCollection, schema::v1::{StateRequest, StateResponse}, - service::chain_sync::ChainSyncInterfaceHandle, state::StateSync, warp::{WarpProofImportResult, WarpSync}, }; @@ -79,6 +78,7 @@ use sc_network_common::{ SyncState, SyncStatus, }, }; +pub use service::chain_sync::SyncingService; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index c797c43de7c9d..7539c9ce41d07 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -66,7 +66,7 @@ pub enum ToServiceCommand { /// Handle for communicating with `ChainSync` asynchronously #[derive(Clone)] -pub struct ChainSyncInterfaceHandle { +pub struct SyncingService { tx: TracingUnboundedSender>, /// Number of peers we're connected to. num_connected: Arc, @@ -74,7 +74,7 @@ pub struct ChainSyncInterfaceHandle { is_major_syncing: Arc, } -impl ChainSyncInterfaceHandle { +impl SyncingService { /// Create new handle pub fn new( tx: TracingUnboundedSender>, @@ -85,9 +85,7 @@ impl ChainSyncInterfaceHandle { } } -impl NetworkSyncForkRequest> - for ChainSyncInterfaceHandle -{ +impl NetworkSyncForkRequest> for SyncingService { /// Configure an explicit fork sync request. /// /// Note that this function should not be used for recent blocks. @@ -103,7 +101,7 @@ impl NetworkSyncForkRequest> } } -impl JustificationSyncLink for ChainSyncInterfaceHandle { +impl JustificationSyncLink for SyncingService { /// Request a justification for the given block from the network. /// /// On success, the justification will be passed to the import queue that was part at @@ -118,7 +116,7 @@ impl JustificationSyncLink for ChainSyncInterfaceHandle { } #[async_trait::async_trait] -impl SyncStatusProvider for ChainSyncInterfaceHandle { +impl SyncStatusProvider for SyncingService { /// Get high-level view of the syncing status. async fn status(&self) -> Result, ()> { let (rtx, rrx) = oneshot::channel(); @@ -128,7 +126,7 @@ impl SyncStatusProvider for ChainSyncInterfaceHandle { } } -impl Link for ChainSyncInterfaceHandle { +impl Link for SyncingService { fn blocks_processed( &mut self, imported: usize, @@ -157,7 +155,7 @@ impl Link for ChainSyncInterfaceHandle { } } -impl SyncEventStream for ChainSyncInterfaceHandle { +impl SyncEventStream for SyncingService { /// Get syncing event stream. fn event_stream(&self, name: &'static str) -> Pin + Send>> { let (tx, rx) = tracing_unbounded(name); @@ -166,7 +164,7 @@ impl SyncEventStream for ChainSyncInterfaceHandle { } } -impl NetworkBlock> for ChainSyncInterfaceHandle { +impl NetworkBlock> for SyncingService { fn announce_block(&self, hash: B::Hash, data: Option>) { let _ = self.tx.unbounded_send(ToServiceCommand::AnnounceBlock(hash, data)); } @@ -177,7 +175,7 @@ impl NetworkBlock> for ChainSyncInterfaceHandle } #[async_trait::async_trait] -impl ChainSyncService for ChainSyncInterfaceHandle { +impl ChainSyncService for SyncingService { async fn num_active_peers(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx)); @@ -232,7 +230,7 @@ impl ChainSyncService for ChainSyncInterfaceHandle { } } -impl sp_consensus::SyncOracle for ChainSyncInterfaceHandle { +impl sp_consensus::SyncOracle for SyncingService { fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 3888a705cd161..df6df595b8dca 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -63,7 +63,7 @@ use sc_network_common::{ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, - service::{chain_sync::ChainSyncInterfaceHandle, network::NetworkServiceProvider}, + service::{chain_sync::SyncingService, network::NetworkServiceProvider}, state_request_handler::StateRequestHandler, warp_request_handler, }; @@ -237,7 +237,7 @@ pub struct Peer { select_chain: Option>, backend: Option>, network: NetworkWorker::Hash, PeersFullClient>, - chain_sync_service: Arc>, + sync_service: Arc>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, listen_addr: Multiaddr, @@ -272,12 +272,12 @@ where /// Returns the number of downloaded blocks. pub async fn num_downloaded_blocks(&self) -> usize { - self.chain_sync_service.num_downloaded_blocks().await.unwrap() + self.sync_service.num_downloaded_blocks().await.unwrap() } /// Returns true if we have no peer. pub fn is_offline(&self) -> bool { - self.chain_sync_service.is_offline() + self.sync_service.is_offline() } /// Request a justification for the given block. @@ -399,7 +399,7 @@ where } if inform_sync_about_new_best_block { - self.chain_sync_service.new_best_block_imported( + self.sync_service.new_best_block_imported( at, *full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number(), ); @@ -511,8 +511,8 @@ where self.network.service() } - pub fn sync_service(&self) -> &Arc> { - &self.chain_sync_service + pub fn sync_service(&self) -> &Arc> { + &self.sync_service } /// Get a reference to the network worker. @@ -880,7 +880,7 @@ where let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (engine, chain_sync_service, block_announce_config) = + let (engine, sync_service, block_announce_config) = sc_network_sync::engine::SyncingEngine::new( Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), client.clone(), @@ -946,6 +946,8 @@ where }, ) .unwrap(); + let sync_service_import_queue = Box::new(sync_service.clone()); + let sync_service = Arc::new(sync_service.clone()); let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, @@ -954,9 +956,7 @@ where chain: client.clone(), protocol_id, fork_id, - // engine, - // TODO(aaro): fix arcs - chain_sync_service: Box::new(chain_sync_service.clone()), + sync_service: sync_service.clone(), metrics_registry: None, block_announce_config, request_response_protocol_configs: [ @@ -975,9 +975,8 @@ where async_std::task::spawn(async move { chain_sync_network_provider.run(service).await; }); - let service = chain_sync_service.clone(); async_std::task::spawn(async move { - import_queue.run(Box::new(service)).await; + import_queue.run(sync_service_import_queue).await; }); let service = network.service().clone(); async_std::task::spawn(async move { @@ -1004,7 +1003,7 @@ where block_import, verifier, network, - chain_sync_service: Arc::new(chain_sync_service), + sync_service, listen_addr, }); }); @@ -1034,12 +1033,12 @@ where let peers = self.peers_mut(); for peer in peers { - if peer.chain_sync_service.is_major_syncing() || - peer.chain_sync_service.num_queued_blocks().await.unwrap() != 0 + if peer.sync_service.is_major_syncing() || + peer.sync_service.num_queued_blocks().await.unwrap() != 0 { return false } - if peer.chain_sync_service.num_sync_requests().await.unwrap() != 0 { + if peer.sync_service.num_sync_requests().await.unwrap() != 0 { return false } match (highest, peer.client.info().best_hash) { @@ -1055,10 +1054,10 @@ where async fn is_idle(&mut self) -> bool { let peers = self.peers_mut(); for peer in peers { - if peer.chain_sync_service.num_queued_blocks().await.unwrap() != 0 { + if peer.sync_service.num_queued_blocks().await.unwrap() != 0 { return false } - if peer.chain_sync_service.num_sync_requests().await.unwrap() != 0 { + if peer.sync_service.num_sync_requests().await.unwrap() != 0 { return false } } @@ -1137,7 +1136,7 @@ where peer.finality_notification_stream.as_mut().poll_next(cx) { use sc_network::ChainSyncService; - peer.chain_sync_service + peer.sync_service .on_block_finalized(notification.hash, notification.header); } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 9fe48e45e7a29..c04c2ee2e217f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -852,7 +852,7 @@ where // TODO(aaro): expose `config.network` through common crate let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( + let (engine, sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), client.clone(), config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), @@ -921,6 +921,9 @@ where }, )?; + let sync_service_import_queue = sync_service.clone(); + let sync_service = Arc::new(sync_service); + request_response_protocol_configs.push(config.network.ipfs_server.then(|| { let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run()); @@ -939,8 +942,7 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - // engine, - chain_sync_service: Box::new(chain_sync_service.clone()), + sync_service: sync_service.clone(), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, request_response_protocol_configs: request_response_protocol_configs @@ -973,12 +975,10 @@ where let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let sync_service = chain_sync_service.clone(); let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( network.clone(), - // TODO(aaro): wrap chainsyncinterface into an arc - Arc::new(chain_sync_service.clone()), + sync_service.clone(), Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), config.prometheus_config.as_ref().map(|config| &config.registry), )?; @@ -989,7 +989,7 @@ where Some("networking"), chain_sync_network_provider.run(network.clone()), ); - spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service))); + spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue))); let event_stream = network.event_stream("syncing"); spawn_handle.spawn("syncing", None, engine.run(event_stream)); @@ -1001,7 +1001,7 @@ where network_mut, client, system_rpc_rx, - Arc::new(chain_sync_service.clone()), + sync_service.clone(), has_bootnodes, config.announce_block, ); @@ -1047,7 +1047,7 @@ where system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx), - Arc::new(chain_sync_service), + sync_service.clone() )) } From 5dd14e312fa44244e17321b5649387c797ae0246 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 28 Nov 2022 10:34:23 +0200 Subject: [PATCH 14/30] Use `SyncingService` for BEEFY --- Cargo.lock | 1 + client/beefy/Cargo.toml | 1 + client/beefy/src/lib.rs | 19 ++++++++++--------- client/beefy/src/tests.rs | 2 +- client/beefy/src/worker.rs | 32 ++++++++++++-------------------- client/network/test/src/lib.rs | 3 +-- client/service/src/builder.rs | 2 +- 7 files changed, 27 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b531b690024b..5f879de00e147 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -461,6 +461,7 @@ dependencies = [ "sc-network", "sc-network-common", "sc-network-gossip", + "sc-network-sync", "sc-network-test", "sc-utils", "serde", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 999c5a298fe57..0fe0454e30225 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -29,6 +29,7 @@ sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } +sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-application-crypto = { version = "7.0.0", path = "../../primitives/application-crypto" } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 195167112d921..e679d816f2408 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -165,11 +165,11 @@ where } /// BEEFY gadget network parameters. -pub struct BeefyNetworkParams { +pub struct BeefyNetworkParams { /// Network implementing gossip, requests and sync-oracle. pub network: Arc, - /// Syncing service implementing event stream for peers. - pub sync: Arc, + /// Syncing service implementing a sync oracle and an event stream for peers. + pub sync: Arc, /// Chain specific BEEFY gossip protocol name. See /// [`communication::beefy_protocol_name::gossip_protocol_name`]. pub gossip_protocol_name: ProtocolName, @@ -181,7 +181,7 @@ pub struct BeefyNetworkParams { } /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend @@ -193,7 +193,7 @@ pub struct BeefyParams { /// Local key store pub key_store: Option, /// BEEFY voter network params - pub network_params: BeefyNetworkParams, + pub network_params: BeefyNetworkParams, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry @@ -207,15 +207,17 @@ pub struct BeefyParams { /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget(beefy_params: BeefyParams) -where +pub async fn start_beefy_gadget( + beefy_params: BeefyParams, +) where B: Block, BE: Backend, C: Client + BlockBackend, P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi>, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + Send + Sync + 'static, + S: SyncEventStream + SyncOracle + 'static, { let BeefyParams { client, @@ -292,7 +294,6 @@ where let worker_params = worker::WorkerParams { backend, payload_provider, - network, sync, key_store: key_store.into(), known_peers, diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 790725a07d637..e37647a821c0c 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -365,7 +365,7 @@ where prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, }; - let task = crate::start_beefy_gadget::<_, _, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} assert_send(&task); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 68af4b8566e99..0aa4314f0a31f 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -29,10 +29,7 @@ use log::{debug, error, info, log_enabled, trace, warn}; use parking_lot::Mutex; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_network_common::{ - service::{NetworkEventStream, NetworkRequest}, - sync::{SyncEvent, SyncEventStream}, -}; +use sc_network_common::sync::{SyncEvent, SyncEventStream}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -248,11 +245,10 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub backend: Arc, pub payload_provider: P, - pub network: N, - pub sync: Arc, + pub sync: Arc, pub key_store: BeefyKeystore, pub known_peers: Arc>>, pub gossip_engine: GossipEngine, @@ -298,12 +294,11 @@ impl PersistedState { } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities backend: Arc, payload_provider: P, - network: N, - sync: Arc, + sync: Arc, key_store: BeefyKeystore, // communication @@ -327,14 +322,14 @@ pub(crate) struct BeefyWorker { persisted_state: PersistedState, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi>, - N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, + S: SyncEventStream + SyncOracle, { /// Return a new BEEFY worker instance. /// @@ -342,12 +337,11 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { backend, payload_provider, key_store, - network, sync, gossip_engine, gossip_validator, @@ -361,7 +355,6 @@ where BeefyWorker { backend, payload_provider, - network, sync, known_peers, key_store, @@ -815,7 +808,7 @@ where loop { // Don't bother voting or requesting justifications during major sync. - if !self.network.is_major_syncing() { + if !self.sync.is_major_syncing() { // If the current target is a mandatory block, // make sure there's also an on-demand justification request out for it. if let Some(block) = self.voting_oracle().mandatory_pending() { @@ -984,7 +977,7 @@ pub(crate) mod tests { use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; use sc_client_api::{Backend as BackendT, HeaderBackend}; - use sc_network::NetworkService; + use sc_network_sync::SyncingService; use sc_network_test::TestNetFactory; use sp_api::HeaderT; use sp_blockchain::Backend as BlockchainBackendT; @@ -1028,7 +1021,7 @@ pub(crate) mod tests { Backend, MmrRootProvider, TestApi, - Arc>, + Arc>, > { let keystore = create_beefy_keystore(*key); @@ -1087,8 +1080,7 @@ pub(crate) mod tests { gossip_engine, gossip_validator, metrics: None, - network, - sync, + sync: Arc::new(sync), on_demand_justifications, persisted_state, }; diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index df6df595b8dca..bbc6db93c21f6 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -1136,8 +1136,7 @@ where peer.finality_notification_stream.as_mut().poll_next(cx) { use sc_network::ChainSyncService; - peer.sync_service - .on_block_finalized(notification.hash, notification.header); + peer.sync_service.on_block_finalized(notification.hash, notification.header); } } }); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index c04c2ee2e217f..4de71a3d31c22 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1047,7 +1047,7 @@ where system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx), - sync_service.clone() + sync_service.clone(), )) } From 83509c988ee97af520271e689c33bd781af3a528 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 28 Nov 2022 21:35:42 +0200 Subject: [PATCH 15/30] Use `SyncingService` for GRANDPA --- client/beefy/src/lib.rs | 4 +- .../finality-grandpa/src/communication/mod.rs | 43 +++++++++++++++---- .../src/communication/tests.rs | 18 +++++++- client/finality-grandpa/src/environment.rs | 31 +++++++++---- client/finality-grandpa/src/lib.rs | 33 ++++++++------ client/finality-grandpa/src/observer.rs | 19 ++++---- client/finality-grandpa/src/tests.rs | 10 +++-- client/network-gossip/src/bridge.rs | 22 ++++++++-- client/network-gossip/src/lib.rs | 6 +++ client/network/src/service/tests/mod.rs | 2 +- 10 files changed, 137 insertions(+), 51 deletions(-) diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index e679d816f2408..9e1a6777362d9 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -43,7 +43,7 @@ use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotificatio use sc_consensus::BlockImport; use sc_network::ProtocolName; use sc_network_common::{service::NetworkRequest, sync::SyncEventStream}; -use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; +use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi}; use sp_blockchain::{ Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult, @@ -217,7 +217,7 @@ pub async fn start_beefy_gadget( R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi>, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, - S: SyncEventStream + SyncOracle + 'static, + S: GossipSyncing + SyncOracle + 'static, { let BeefyParams { client, diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 2668952320c8e..a3d83849d8da2 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -166,8 +166,7 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// A handle to the network. /// -/// Something that provides both the capabilities needed for the `gossip_network::Network` trait as -/// well as the ability to set a fork sync request for a particular block. +/// Something that provides the capabilities needed for the `gossip_network::Network` trait. pub trait Network: NetworkSyncForkRequest> + NetworkBlock> @@ -190,6 +189,31 @@ where { } +/// A handle to syncing-related services. +/// +/// Something that provides the ability to set a fork sync request for a particular block. +pub trait Syncing: + NetworkSyncForkRequest> + + NetworkBlock> + + SyncEventStream + + Clone + + Send + + 'static +{ +} + +impl Syncing for T +where + Block: BlockT, + T: NetworkSyncForkRequest> + + NetworkBlock> + + SyncEventStream + + Clone + + Send + + 'static, +{ +} + /// Create a unique topic for a round and set-id combo. pub(crate) fn round_topic(round: RoundNumber, set_id: SetIdNumber) -> B::Hash { <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) @@ -201,8 +225,9 @@ pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { } /// Bridge between the underlying network service, gossiping consensus messages and Grandpa -pub(crate) struct NetworkBridge> { +pub(crate) struct NetworkBridge, S: Syncing> { service: N, + sync: S, gossip_engine: Arc>>, validator: Arc>, @@ -228,16 +253,16 @@ pub(crate) struct NetworkBridge> { telemetry: Option, } -impl> Unpin for NetworkBridge {} +impl, S: Syncing> Unpin for NetworkBridge {} -impl> NetworkBridge { +impl, S: Syncing> NetworkBridge { /// Create a new NetworkBridge to the given NetworkService. Returns the service /// handle. /// On creation it will register previous rounds' votes with the gossip /// service taken from the VoterSetState. pub(crate) fn new( service: N, - sync: Arc, + sync: S, config: crate::Config, set_state: crate::environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, @@ -294,6 +319,7 @@ impl> NetworkBridge { NetworkBridge { service, + sync, gossip_engine, validator, neighbor_sender: neighbor_packet_sender, @@ -477,7 +503,7 @@ impl> NetworkBridge { } } -impl> Future for NetworkBridge { +impl, S: Syncing> Future for NetworkBridge { type Output = Result<(), Error>; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { @@ -654,10 +680,11 @@ fn incoming_global( }) } -impl> Clone for NetworkBridge { +impl, S: Syncing> Clone for NetworkBridge { fn clone(&self) -> Self { NetworkBridge { service: self.service.clone(), + sync: self.sync.clone(), gossip_engine: self.gossip_engine.clone(), validator: Arc::clone(&self.validator), neighbor_sender: self.neighbor_sender.clone(), diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 119201e5fe50c..3544725de4397 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -203,8 +203,22 @@ impl SyncEventStream for TestSync { } } +impl NetworkBlock> for TestSync { + fn announce_block(&self, hash: Hash, _data: Option>) { + todo!(); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); + } +} + +impl NetworkSyncForkRequest> for TestSync { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + pub(crate) struct Tester { - pub(crate) net_handle: super::NetworkBridge, + pub(crate) net_handle: super::NetworkBridge, gossip_validator: Arc>, pub(crate) events: TracingUnboundedReceiver, } @@ -272,7 +286,7 @@ fn voter_set_state() -> SharedVoterSetState { pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; - let sync = Arc::new(TestSync {}); + let sync = TestSync {}; #[derive(Clone)] struct Exit; diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f235c3a86c04e..d55b00d5ad839 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -53,7 +53,7 @@ use sp_runtime::{ use crate::{ authorities::{AuthoritySet, SharedAuthoritySet}, - communication::Network as NetworkT, + communication::{Network as NetworkT, Syncing as SyncingT}, justification::GrandpaJustification, local_authority_id, notification::GrandpaJustificationSender, @@ -426,13 +426,21 @@ impl Metrics { } /// The environment we run GRANDPA in. -pub(crate) struct Environment, SC, VR> { +pub(crate) struct Environment< + Backend, + Block: BlockT, + C, + N: NetworkT, + S: SyncingT, + SC, + VR, +> { pub(crate) client: Arc, pub(crate) select_chain: SC, pub(crate) voters: Arc>, pub(crate) config: Config, pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) network: crate::communication::NetworkBridge, + pub(crate) network: crate::communication::NetworkBridge, pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, pub(crate) voting_rule: VR, @@ -442,7 +450,9 @@ pub(crate) struct Environment, SC, pub(crate) _phantom: PhantomData, } -impl, SC, VR> Environment { +impl, S: SyncingT, SC, VR> + Environment +{ /// Updates the voter set state using the given closure. The write lock is /// held during evaluation of the closure and the environment's voter set /// state is set to its result if successful. @@ -472,13 +482,14 @@ impl, SC, VR> Environment Environment +impl Environment where Block: BlockT, BE: BackendT, C: ClientForGrandpa, C::Api: GrandpaApi, N: NetworkT, + S: SyncingT, SC: SelectChainT, { /// Report the given equivocation to the GRANDPA runtime module. This method @@ -572,13 +583,14 @@ where } } -impl finality_grandpa::Chain> - for Environment +impl finality_grandpa::Chain> + for Environment where Block: BlockT, BE: BackendT, C: ClientForGrandpa, N: NetworkT, + S: SyncingT, SC: SelectChainT, VR: VotingRuleT, NumberFor: BlockNumberOps, @@ -625,14 +637,15 @@ where Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl voter::Environment> - for Environment +impl voter::Environment> + for Environment where Block: BlockT, B: BackendT, C: ClientForGrandpa + 'static, C::Api: GrandpaApi, N: NetworkT, + S: SyncingT, SC: SelectChainT + 'static, VR: VotingRuleT + Clone + 'static, NumberFor: BlockNumberOps, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 0e29354290aed..300b71808630e 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -138,7 +138,7 @@ pub use voting_rule::{ }; use aux_schema::PersistentData; -use communication::{Network as NetworkT, NetworkBridge}; +use communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT}; use environment::{Environment, VoterSetState}; use sp_finality_grandpa::{AuthorityList, AuthoritySignature, SetId}; use until_imported::UntilGlobalMessageBlocksImported; @@ -347,10 +347,11 @@ pub(crate) trait BlockSyncRequester { ); } -impl BlockSyncRequester for NetworkBridge +impl BlockSyncRequester for NetworkBridge where Block: BlockT, Network: NetworkT, + Syncing: SyncingT, { fn set_sync_fork_request( &self, @@ -614,11 +615,11 @@ where )) } -fn global_communication( +fn global_communication( set_id: SetId, voters: &Arc>, client: Arc, - network: &NetworkBridge, + network: &NetworkBridge, keystore: Option<&SyncCryptoStorePtr>, metrics: Option, ) -> ( @@ -637,6 +638,7 @@ where BE: Backend + 'static, C: ClientForGrandpa + 'static, N: NetworkT, + S: SyncingT, NumberFor: BlockNumberOps, { let is_voter = local_authority_id(voters, keystore).is_some(); @@ -662,7 +664,7 @@ where } /// Parameters used to run Grandpa. -pub struct GrandpaParams { +pub struct GrandpaParams { /// Configuration for the GRANDPA service. pub config: Config, /// A link to the block import worker. @@ -674,7 +676,7 @@ pub struct GrandpaParams { /// to the configuration of the networking. See [`grandpa_peers_set_config`]. pub network: N, /// Event stream for syncing-related events. - pub sync: Arc, + pub sync: S, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. @@ -709,13 +711,14 @@ pub fn grandpa_peers_set_config( /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. -pub fn run_grandpa_voter( - grandpa_params: GrandpaParams, +pub fn run_grandpa_voter( + grandpa_params: GrandpaParams, ) -> sp_blockchain::Result + Send> where Block::Hash: Ord, BE: Backend + 'static, N: NetworkT + Sync + 'static, + S: SyncingT + Sync + 'static, SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, @@ -836,26 +839,27 @@ impl Metrics { /// Future that powers the voter. #[must_use] -struct VoterWork, SC, VR> { +struct VoterWork, S: SyncingT, SC, VR> { voter: Pin< Box>>> + Send>, >, shared_voter_state: SharedVoterState, - env: Arc>, + env: Arc>, voter_commands_rx: TracingUnboundedReceiver>>, - network: NetworkBridge, + network: NetworkBridge, telemetry: Option, /// Prometheus metrics. metrics: Option, } -impl VoterWork +impl VoterWork where Block: BlockT, B: Backend + 'static, C: ClientForGrandpa + 'static, C::Api: GrandpaApi, N: NetworkT + Sync, + S: SyncingT + Sync, NumberFor: BlockNumberOps, SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, @@ -863,7 +867,7 @@ where fn new( client: Arc, config: Config, - network: NetworkBridge, + network: NetworkBridge, select_chain: SC, voting_rule: VR, persistent_data: PersistentData, @@ -1066,11 +1070,12 @@ where } } -impl Future for VoterWork +impl Future for VoterWork where Block: BlockT, B: Backend + 'static, N: NetworkT + Sync, + S: SyncingT + Sync, NumberFor: BlockNumberOps, SC: SelectChain + 'static, C: ClientForGrandpa + 'static, diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index a8ac07c5feb11..f527438e41347 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -40,7 +40,7 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use crate::{ authorities::SharedAuthoritySet, aux_schema::PersistentData, - communication::{Network as NetworkT, NetworkBridge}, + communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT}, environment, global_communication, notification::GrandpaJustificationSender, ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand, @@ -164,15 +164,16 @@ where /// already been instantiated with `block_import`. /// NOTE: this is currently not part of the crate's public API since we don't consider /// it stable enough to use on a live network. -pub fn run_grandpa_observer( +pub fn run_grandpa_observer( config: Config, link: LinkHalf, network: N, - sync: Arc, + sync: S, ) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, N: NetworkT, + S: SyncingT, SC: SelectChain, NumberFor: BlockNumberOps, Client: ClientForGrandpa + 'static, @@ -214,11 +215,11 @@ where /// Future that powers the observer. #[must_use] -struct ObserverWork> { +struct ObserverWork, S: SyncingT> { observer: Pin>>> + Send>>, client: Arc, - network: NetworkBridge, + network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, @@ -227,17 +228,18 @@ struct ObserverWork> { _phantom: PhantomData, } -impl ObserverWork +impl ObserverWork where B: BlockT, BE: Backend + 'static, Client: ClientForGrandpa + 'static, Network: NetworkT, + Syncing: SyncingT, NumberFor: BlockNumberOps, { fn new( client: Arc, - network: NetworkBridge, + network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, @@ -350,12 +352,13 @@ where } } -impl Future for ObserverWork +impl Future for ObserverWork where B: BlockT, BE: Backend + Unpin + 'static, C: ClientForGrandpa + 'static, N: NetworkT, + S: SyncingT, NumberFor: BlockNumberOps, { type Output = Result<(), Error>; diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 4326d024580e7..1c27e341f15bc 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1329,24 +1329,26 @@ fn voter_catches_up_to_latest_round_when_behind() { runtime.block_on(future::select(test, drive_to_completion)); } -type TestEnvironment = Environment< +type TestEnvironment = Environment< substrate_test_runtime_client::Backend, Block, TestClient, N, + S, LongestChain, VR, >; -fn test_environment( +fn test_environment( link: &TestLinkHalf, keystore: Option, network_service: N, - sync_service: Arc, + sync_service: S, voting_rule: VR, -) -> TestEnvironment +) -> TestEnvironment where N: NetworkT, + S: SyncingT, VR: VotingRule, { let PersistentData { ref authority_set, ref set_state, .. } = link.persistent_data; diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 8b0325f45054d..93919f8b73547 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -18,7 +18,7 @@ use crate::{ state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}, - Network, Validator, + Network, Syncing, Validator, }; use sc_network_common::{ @@ -80,15 +80,17 @@ impl Unpin for GossipEngine {} impl GossipEngine { /// Create a new instance. - pub fn new + Send + Clone + 'static>( + pub fn new( network: N, - sync: Arc, + sync: S, protocol: impl Into, validator: Arc>, metrics_registry: Option<&Registry>, ) -> Self where B: 'static, + N: Network + Send + Clone + 'static, + S: Syncing + Send + Clone + 'static, { let protocol = protocol.into(); let network_event_stream = network.event_stream("network-gossip"); @@ -504,6 +506,20 @@ mod tests { } } + impl NetworkBlock<::Hash, NumberFor> for TestSync { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { + unimplemented!(); + } + + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { + unimplemented!(); + } + } + struct AllowAll; impl Validator for AllowAll { fn validate( diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1c8fb8ba05ce7..79522f76b7567 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -71,6 +71,7 @@ use libp2p::{multiaddr, PeerId}; use sc_network_common::{ protocol::ProtocolName, service::{NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers}, + sync::SyncEventStream, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::iter; @@ -100,3 +101,8 @@ impl Network for T where + NetworkBlock> { } + +/// Abstraction over the syncing subsystem. +pub trait Syncing: SyncEventStream + NetworkBlock> {} + +impl Syncing for T where T: SyncEventStream + NetworkBlock> {} diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 54d624dc9202b..9c8e37d273040 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -287,7 +287,7 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - chain_sync_service: Box::new(chain_sync_service), + sync_service: Arc::new(chain_sync_service), metrics_registry: None, request_response_protocol_configs: [ block_request_protocol_config, From bb005b604cf08ce96e2330598df0126649b94657 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 28 Nov 2022 23:29:14 +0200 Subject: [PATCH 16/30] Remove call delegation from `NetworkService` --- Cargo.lock | 2 + bin/node-template/node/src/service.rs | 6 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/chain_spec.rs | 3 +- bin/node/cli/src/service.rs | 24 ++++-- .../finality-grandpa/src/communication/mod.rs | 19 +---- client/network-gossip/src/bridge.rs | 4 +- client/network-gossip/src/lib.rs | 12 +-- client/network/src/service.rs | 84 ------------------- client/network/test/src/lib.rs | 12 +-- client/network/test/src/sync.rs | 2 +- client/network/transactions/src/lib.rs | 48 ++++++----- client/service/src/builder.rs | 6 +- client/service/src/lib.rs | 9 +- client/service/test/Cargo.toml | 1 + client/service/test/src/lib.rs | 18 +++- 16 files changed, 93 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f879de00e147..4af705fe8b560 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4551,6 +4551,7 @@ dependencies = [ "sc-keystore", "sc-network", "sc-network-common", + "sc-network-sync", "sc-rpc", "sc-service", "sc-service-test", @@ -8659,6 +8660,7 @@ dependencies = [ "sc-executor", "sc-network", "sc-network-common", + "sc-network-sync", "sc-service", "sc-transaction-pool-api", "sp-api", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 1720524a2bdfc..e8086e6ef7f39 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -277,8 +277,8 @@ pub fn new_full(mut config: Configuration) -> Result force_authoring, backoff_authoring_blocks, keystore: keystore_container.sync_keystore(), - sync_oracle: network.clone(), - justification_sync_link: network.clone(), + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), @@ -321,7 +321,7 @@ pub fn new_full(mut config: Configuration) -> Result config: grandpa_config, link: grandpa_link, network, - // TODo(aaro): fix arc + // TODO(aaro): fix arc sync: Arc::new(sync_service), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d56764f9e2040..dc41096c792d3 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -67,6 +67,7 @@ sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transacti sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } +sc-network-sync = { version = "0.10.0-dev", path = "../../../client/network/sync" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 8d74f2bde0f44..5684ee8b3ce8b 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -476,12 +476,13 @@ pub(crate) mod tests { sp_tracing::try_init_simple(); sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } = + let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } = new_full_base(config, false, |_, _| ())?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, network, + sync, transaction_pool, )) }); diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e1f6d1b686e85..30586519abe79 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -31,6 +31,7 @@ use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::NetworkService; use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; +use sc_network_sync::SyncingService; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; @@ -302,6 +303,8 @@ pub struct NewFullBase { pub client: Arc, /// The networking service of the node. pub network: Arc::Hash>>, + /// The syncing service of the node. + pub sync: Arc>, /// The transaction pool of the node. pub transaction_pool: Arc, /// The rpc handlers of the node. @@ -431,8 +434,8 @@ pub fn new_full_base( select_chain, env: proposer, block_import, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), create_inherent_data_providers: move |parent, ()| { let client_clone = client_clone.clone(); async move { @@ -533,7 +536,7 @@ pub fn new_full_base( config, link: grandpa_link, network: network.clone(), - sync: Arc::new(sync_service), + sync: Arc::new(sync_service.clone()), telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, @@ -550,7 +553,14 @@ pub fn new_full_base( } network_starter.start_network(); - Ok(NewFullBase { task_manager, client, network, transaction_pool, rpc_handlers }) + Ok(NewFullBase { + task_manager, + client, + network, + sync: sync_service, + transaction_pool, + rpc_handlers, + }) } /// Builds a new service for a full client. @@ -623,7 +633,7 @@ mod tests { chain_spec, |config| { let mut setup_handles = None; - let NewFullBase { task_manager, client, network, transaction_pool, .. } = + let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } = new_full_base( config, false, @@ -637,6 +647,7 @@ mod tests { task_manager, client, network, + sync, transaction_pool, ); Ok((node, setup_handles.unwrap())) @@ -802,12 +813,13 @@ mod tests { sc_service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } = + let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } = new_full_base(config, false, |_, _| ())?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, network, + sync, transaction_pool, )) }, diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index a3d83849d8da2..237eb7766be7e 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -167,25 +167,12 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// A handle to the network. /// /// Something that provides the capabilities needed for the `gossip_network::Network` trait. -pub trait Network: - NetworkSyncForkRequest> - + NetworkBlock> - + GossipNetwork - + Clone - + Send - + 'static -{ -} +pub trait Network: GossipNetwork + Clone + Send + 'static {} impl Network for T where Block: BlockT, - T: NetworkSyncForkRequest> - + NetworkBlock> - + GossipNetwork - + Clone - + Send - + 'static, + T: GossipNetwork + Clone + Send + 'static, { } @@ -499,7 +486,7 @@ impl, S: Syncing> NetworkBridge { hash: B::Hash, number: NumberFor, ) { - self.service.set_sync_fork_request(peers, hash, number) + self.sync.set_sync_fork_request(peers, hash, number) } } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 93919f8b73547..a2fa98e533f57 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -47,6 +47,7 @@ use std::{ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, + sync: Box>, periodic_maintenance_interval: futures_timer::Delay, protocol: ProtocolName, @@ -99,6 +100,7 @@ impl GossipEngine { GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), network: Box::new(network), + sync: Box::new(sync), periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), protocol, @@ -172,7 +174,7 @@ impl GossipEngine { /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. pub fn announce(&self, block: B::Hash, associated_data: Option>) { - self.network.announce_block(block, associated_data); + self.sync.announce_block(block, associated_data); } } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 79522f76b7567..667ad39e57b1e 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -81,9 +81,7 @@ mod state_machine; mod validator; /// Abstraction over a network. -pub trait Network: - NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> -{ +pub trait Network: NetworkPeers + NetworkEventStream + NetworkNotification { fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); @@ -94,13 +92,7 @@ pub trait Network: } } -impl Network for T where - T: NetworkPeers - + NetworkEventStream - + NetworkNotification - + NetworkBlock> -{ -} +impl Network for T where T: NetworkPeers + NetworkEventStream + NetworkNotification {} /// Abstraction over the syncing subsystem. pub trait Syncing: SyncEventStream + NetworkBlock> {} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1d6ec24503cf1..4c9cd1704580e 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -480,36 +480,6 @@ where self.network_service.behaviour().user_protocol().num_connected_peers() } - // /// Returns the number of peers we're connected to and that are being queried. - // pub fn num_active_peers(&self) -> usize { - // self.network_service.behaviour().user_protocol().num_active_peers() - // } - - // /// Target sync block number. - // pub fn best_seen_block(&self) -> Option> { - // self.network_service.behaviour().user_protocol().best_seen_block() - // } - - // /// Number of peers participating in syncing. - // pub fn num_sync_peers(&self) -> u32 { - // self.network_service.behaviour().user_protocol().num_sync_peers() - // } - - // /// Number of blocks in the import queue. - // pub fn num_queued_blocks(&self) -> u32 { - // self.network_service.behaviour().user_protocol().num_queued_blocks() - // } - - // /// Returns the number of downloaded blocks. - // pub fn num_downloaded_blocks(&self) -> usize { - // self.network_service.behaviour().user_protocol().num_downloaded_blocks() - // } - - // /// Number of active sync requests. - // pub fn num_sync_requests(&self) -> usize { - // self.network_service.behaviour().user_protocol().num_sync_requests() - // } - /// Adds an address for a node. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { self.network_service.behaviour_mut().add_known_address(peer_id, addr); @@ -690,30 +660,6 @@ impl NetworkService { } } -impl sp_consensus::SyncOracle for NetworkService { - fn is_major_syncing(&self) -> bool { - self.sync_service.is_major_syncing() - } - - fn is_offline(&self) -> bool { - self.sync_service.is_offline() - } -} - -impl sc_consensus::JustificationSyncLink for NetworkService { - /// Request a justification for the given block from the network. - /// - /// On success, the justification will be passed to the import queue that was part at - /// initialization as part of the configuration. - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self.sync_service.request_justification(hash, number); - } - - fn clear_justification_requests(&self) { - let _ = self.sync_service.clear_justification_requests(); - } -} - impl NetworkStateInfo for NetworkService where B: sp_runtime::traits::Block, @@ -762,22 +708,6 @@ where } } -impl NetworkSyncForkRequest> for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - self.sync_service.set_sync_fork_request(peers, hash, number); - } -} - #[async_trait::async_trait] impl NetworkStatusProvider for NetworkService where @@ -1083,20 +1013,6 @@ where } } -impl NetworkBlock> for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.sync_service.announce_block(hash, data); - } - - fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self.sync_service.new_best_block_imported(hash, number); - } -} - /// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. #[must_use] pub struct NotificationSender { diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index bbc6db93c21f6..71eaebe7e512b 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -255,7 +255,7 @@ where /// Returns true if we're major syncing. pub fn is_major_syncing(&self) -> bool { - self.network.service().is_major_syncing() + self.sync_service.is_major_syncing() } // Returns a clone of the local SelectChain, only available on full nodes @@ -282,12 +282,12 @@ where /// Request a justification for the given block. pub fn request_justification(&self, hash: &::Hash, number: NumberFor) { - self.network.service().request_justification(hash, number); + self.sync_service.request_justification(hash, number); } /// Announces an important block on the network. pub fn announce_block(&self, hash: ::Hash, data: Option>) { - self.network.service().announce_block(hash, data); + self.sync_service.announce_block(hash, data); } /// Request explicit fork sync. @@ -297,7 +297,7 @@ where hash: ::Hash, number: NumberFor, ) { - self.network.service().set_sync_fork_request(peers, hash, number); + self.sync_service.set_sync_fork_request(peers, hash, number); } /// Add blocks to the peer -- edit the block before adding @@ -393,7 +393,7 @@ where futures::executor::block_on(self.block_import.import_block(import_block, cache)) .expect("block_import failed"); if announce_block { - self.network.service().announce_block(hash, None); + self.sync_service.announce_block(hash, None); } at = hash; } @@ -1128,7 +1128,7 @@ where while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { - peer.network.service().announce_block(notification.hash, None); + peer.sync_service.announce_block(notification.hash, None); } // We poll `finality_notification_stream`. diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c7f345704a364..aff620b6d890c 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -786,7 +786,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { assert!(!net.peer(1).has_block(block_hash)); // Make sync protocol aware of the best block - net.peer(0).network_service().new_best_block_imported(block_hash, 3); + net.peer(0).sync_service().new_best_block_imported(block_hash, 3); net.block_until_idle(); // Connect another node that should now sync to the tip diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index bb9e5531176ca..21c47009b7246 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -161,16 +161,17 @@ impl TransactionsHandlerPrototype { pub fn build< B: BlockT + 'static, H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, >( self, - service: S, - sync_service: Arc, + network: N, + sync: S, transaction_pool: Arc>, metrics_registry: Option<&Registry>, - ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let net_event_stream = service.event_stream("transactions-handler-net"); - let sync_event_stream = sync_service.event_stream("transactions-handler-sync"); + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + let net_event_stream = network.event_stream("transactions-handler-net"); + let sync_event_stream = sync.event_stream("transactions-handler-sync"); let (to_handler, from_controller) = mpsc::unbounded(); let handler = TransactionsHandler { @@ -178,7 +179,8 @@ impl TransactionsHandlerPrototype { propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), - service, + network, + sync, net_event_stream, sync_event_stream, peers: HashMap::new(), @@ -229,7 +231,8 @@ enum ToHandler { pub struct TransactionsHandler< B: BlockT + 'static, H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, > { protocol_name: ProtocolName, /// Interval at which we call `propagate_transactions`. @@ -242,7 +245,9 @@ pub struct TransactionsHandler< /// multiple times concurrently. pending_transactions_peers: HashMap>, /// Network service to use to send messages and manage peers. - service: S, + network: N, + /// Syncing service. + sync: S, /// Stream of networking events. net_event_stream: Pin + Send>>, /// Receiver for syncing-related events. @@ -263,11 +268,12 @@ struct Peer { role: ObservedRole, } -impl TransactionsHandler +impl TransactionsHandler where B: BlockT + 'static, H: ExHashT, - S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, { /// Turns the [`TransactionsHandler`] into a future that should run forever and not be /// interrupted. @@ -315,7 +321,7 @@ where SyncEvent::PeerConnected(remote) => { let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) .collect::(); - let result = self.service.add_peers_to_reserved_set( + let result = self.network.add_peers_to_reserved_set( self.protocol_name.clone(), iter::once(addr).collect(), ); @@ -324,7 +330,7 @@ where } }, SyncEvent::PeerDisconnected(remote) => { - self.service.remove_peers_from_reserved_set( + self.network.remove_peers_from_reserved_set( self.protocol_name.clone(), iter::once(remote).collect(), ); @@ -380,7 +386,7 @@ where /// Called when peer sends us new transactions fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { + if self.sync.is_major_syncing() { trace!(target: "sync", "{} Ignoring transactions while major syncing", who); return } @@ -400,7 +406,7 @@ where let hash = self.transaction_pool.hash_of(&t); peer.known_transactions.insert(hash.clone()); - self.service.report_peer(who, rep::ANY_TRANSACTION); + self.network.report_peer(who, rep::ANY_TRANSACTION); match self.pending_transactions_peers.entry(hash.clone()) { Entry::Vacant(entry) => { @@ -421,9 +427,9 @@ where fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { match import { TransactionImport::KnownGood => - self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), - TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), - TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), + self.network.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::NewGood => self.network.report_peer(who, rep::GOOD_TRANSACTION), + TransactionImport::Bad => self.network.report_peer(who, rep::BAD_TRANSACTION), TransactionImport::None => {}, } } @@ -431,7 +437,7 @@ where /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { + if self.sync.is_major_syncing() { return } @@ -468,7 +474,7 @@ where propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.service + self.network .write_notification(*who, self.protocol_name.clone(), to_send.encode()); } } @@ -483,7 +489,7 @@ where /// Call when we must propagate ready transactions to peers. fn propagate_transactions(&mut self) { // Accept transactions only when node is not major syncing - if self.service.is_major_syncing() { + if self.sync.is_major_syncing() { return } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4de71a3d31c22..e34ba82f4a9ec 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -48,7 +48,7 @@ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, engine::SyncingEngine, service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler, - warp_request_handler::RequestHandler as WarpSyncRequestHandler, + warp_request_handler::RequestHandler as WarpSyncRequestHandler, SyncingService, }; use sc_rpc::{ author::AuthorApiServer, @@ -368,7 +368,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub tx_handler_controller: sc_network_transactions::TransactionsHandlerController<::Hash>, /// Syncing service. - pub sync_service: Arc>, + pub sync_service: Arc>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -746,7 +746,7 @@ pub fn build_network( TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, - Arc>, + Arc>, ), Error, > diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index a21ad7b4dc4ab..3b0c5bca39902 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -45,6 +45,7 @@ use sc_network::PeerId; use sc_network_common::{ config::MultiaddrWithPeerId, service::NetworkBlock, sync::ChainSyncService, }; +use sc_network_sync::SyncingService; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -156,7 +157,7 @@ async fn build_network_future< mut network: sc_network::NetworkWorker, client: Arc, mut rpc_rx: TracingUnboundedReceiver>, - sync_service: Arc>, + sync_service: Arc>, should_have_peers: bool, announce_imported_blocks: bool, ) { @@ -180,11 +181,11 @@ async fn build_network_future< }; if announce_imported_blocks { - network.service().announce_block(notification.hash, None); + sync_service.announce_block(notification.hash, None); } if notification.is_new_best { - network.service().new_best_block_imported( + sync_service.new_best_block_imported( notification.hash, *notification.header.number(), ); @@ -202,7 +203,7 @@ async fn build_network_future< sc_rpc::system::Request::Health(sender) => { let _ = sender.send(sc_rpc::system::Health { peers: sync_service.peers_info().await.expect("syncing to stay active").len(), - is_syncing: network.service().is_major_syncing(), + is_syncing: sync_service.is_major_syncing(), should_have_peers, }); }, diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index b2011c05e8235..c3f2ec30e4e0e 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } +sc-network-sync = { version = "0.10.0-dev", path = "../../network/sync" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 5d29d34a3cbf2..df4b3b972204d 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -27,6 +27,7 @@ use sc_network_common::{ config::{MultiaddrWithPeerId, TransportConfig}, service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; +use sc_network_sync::SyncingService; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, @@ -79,6 +80,7 @@ pub trait TestNetNode: fn network( &self, ) -> Arc::Hash>>; + fn sync(&self) -> &Arc>; fn spawn_handle(&self) -> SpawnTaskHandle; } @@ -87,6 +89,7 @@ pub struct TestNetComponents { client: Arc>, transaction_pool: Arc, network: Arc::Hash>>, + sync: Arc>, } impl @@ -96,9 +99,16 @@ impl task_manager: TaskManager, client: Arc>, network: Arc::Hash>>, + sync: Arc>, transaction_pool: Arc, ) -> Self { - Self { client, transaction_pool, network, task_manager: Arc::new(Mutex::new(task_manager)) } + Self { + client, + sync, + transaction_pool, + network, + task_manager: Arc::new(Mutex::new(task_manager)), + } } } @@ -111,6 +121,7 @@ impl Clone client: self.client.clone(), transaction_pool: self.transaction_pool.clone(), network: self.network.clone(), + sync: self.sync.clone(), } } } @@ -151,6 +162,9 @@ where ) -> Arc::Hash>> { self.network.clone() } + fn sync(&self) -> &Arc> { + &self.sync + } fn spawn_handle(&self) -> SpawnTaskHandle { self.task_manager.lock().spawn_handle() } @@ -470,7 +484,7 @@ pub fn sync( let info = network.full_nodes[0].1.client().info(); network.full_nodes[0] .1 - .network() + .sync() .new_best_block_imported(info.best_hash, info.best_number); network.full_nodes[0].3.clone() }; From e5d6c497eb4ed1a483fd409c5509cf0ded93efe7 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 29 Nov 2022 10:41:06 +0200 Subject: [PATCH 17/30] Remove `ChainSyncService` --- client/network/Cargo.toml | 1 - client/network/common/src/sync.rs | 29 ----- client/network/src/config.rs | 9 +- client/network/src/lib.rs | 34 +---- client/network/src/service.rs | 12 +- client/network/src/service/tests/mod.rs | 8 +- client/network/sync/src/service/chain_sync.rs | 116 +++++++++--------- client/network/test/src/lib.rs | 4 +- client/service/src/builder.rs | 3 +- client/service/src/lib.rs | 4 +- 10 files changed, 72 insertions(+), 148 deletions(-) diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index d468f3b2c98f5..afd9880148081 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -47,7 +47,6 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "./common" } -sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-arithmetic = { version = "6.0.0", path = "../../primitives/arithmetic" } diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 959a88078a2c8..f9ff041b19f5f 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -467,32 +467,3 @@ pub trait ChainSync: Send { /// Send block request to peer fn send_block_request(&mut self, who: PeerId, request: BlockRequest); } - -#[async_trait::async_trait] -pub trait ChainSyncService: Send + Sync { - /// Returns the number of peers we're connected to and that are being queried. - async fn num_active_peers(&self) -> Result; - - /// Target sync block number. - async fn best_seen_block(&self) -> Result>, oneshot::Canceled>; - - /// Number of peers participating in syncing. - async fn num_sync_peers(&self) -> Result; - - /// Number of blocks in the import queue. - async fn num_queued_blocks(&self) -> Result; - - /// Number of downloaded blocks. - async fn num_downloaded_blocks(&self) -> Result; - - /// Number of active sync requests. - async fn num_sync_requests(&self) -> Result; - - /// Returns information about all the peers we are connected to after the handshake message. - async fn peers_info(&self) - -> Result)>, oneshot::Canceled>; - - /// Call this when a block has been finalized. The sync layer may have some additional - /// requesting to perform. - fn on_block_finalized(&self, hash: Block::Hash, header: Block::Header); -} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index cf4583f5e97aa..f3bfc14f931a8 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -56,10 +56,7 @@ use std::{ use zeroize::Zeroize; /// Network initialization parameters. -pub struct Params -where - B: BlockT + 'static, -{ +pub struct Params { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -80,10 +77,6 @@ where /// name on the wire. pub fork_id: Option, - // TODO(aaro): remove this - /// Interface that can be used to delegate syncing-related function calls to `ChainSync` - pub sync_service: Arc>, - /// Registry for recording prometheus metrics to. pub metrics_registry: Option, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 7c23faa95369f..86602a5d19e91 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -272,8 +272,7 @@ pub use sc_network_common::{ }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, - ChainSyncService, ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, - SyncStatusProvider, + ExtendedPeerInfo, StateDownloadProgress, SyncEventStream, SyncState, SyncStatusProvider, }, }; pub use service::{ @@ -296,34 +295,3 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; - -/// Abstraction over syncing-related services -pub trait ChainSyncInterface: - NetworkSyncForkRequest> - + JustificationSyncLink - + Link - + NetworkBlock> - + SyncStatusProvider - + SyncEventStream - + ChainSyncService - + SyncOracle - + Send - + Sync - + 'static -{ -} - -impl ChainSyncInterface for T where - T: NetworkSyncForkRequest> - + JustificationSyncLink - + Link - + NetworkBlock> - + SyncStatusProvider - + SyncEventStream - + ChainSyncService - + SyncOracle - + Send - + Sync - + 'static -{ -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 4c9cd1704580e..710d71c38aeb9 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -117,9 +117,6 @@ pub struct NetworkService { peerset: PeersetHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender, - // TODO(aaro): remove this - /// Interface that can be used to delegate calls to `ChainSync` - sync_service: Arc>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -129,6 +126,8 @@ pub struct NetworkService { /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, + /// Marker for block type + _block: PhantomData, } impl NetworkWorker @@ -148,7 +147,7 @@ where /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -434,12 +433,12 @@ where local_peer_id, local_identity, to_worker, - sync_service: params.sync_service, peers_notifications_sinks: peers_notifications_sinks.clone(), notifications_sizes_metric: metrics .as_ref() .map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, + _block: Default::default(), }); Ok(NetworkWorker { @@ -453,6 +452,7 @@ where metrics, boot_node_ids, _marker: Default::default(), + _block: Default::default(), }) } @@ -1152,6 +1152,8 @@ where /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, + /// Marker for block type + _block: PhantomData, } impl Future for NetworkWorker diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 9c8e37d273040..f22dc167f4401 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; +use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::Multiaddr; @@ -33,6 +33,7 @@ use sc_network_sync::{ engine::SyncingEngine, service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, + SyncingService, }; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{collections::HashSet, sync::Arc}; @@ -91,7 +92,7 @@ struct TestNetworkBuilder { client: Option>, listen_addresses: Vec, set_config: Option, - chain_sync: Option<(Box>, Box>)>, + chain_sync: Option<(Box>, Box>)>, chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>, config: Option, } @@ -132,7 +133,7 @@ impl TestNetworkBuilder { pub fn with_chain_sync( mut self, - chain_sync: (Box>, Box>), + chain_sync: (Box>, Box>), ) -> Self { self.chain_sync = Some(chain_sync); self @@ -287,7 +288,6 @@ impl TestNetworkBuilder { chain: client.clone(), protocol_id, fork_id, - sync_service: Arc::new(chain_sync_service), metrics_registry: None, request_response_protocol_configs: [ block_request_protocol_config, diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 7539c9ce41d07..bd28b59a53b53 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -22,10 +22,7 @@ use libp2p::PeerId; use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link}; use sc_network_common::{ service::{NetworkBlock, NetworkSyncForkRequest}, - sync::{ - ChainSyncService, ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, - SyncStatusProvider, - }, + sync::{ExtendedPeerInfo, SyncEvent, SyncEventStream, SyncStatus, SyncStatusProvider}, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -83,6 +80,61 @@ impl SyncingService { ) -> Self { Self { tx, num_connected, is_major_syncing } } + + pub async fn num_active_peers(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx)); + + rx.await + } + + pub async fn best_seen_block(&self) -> Result>, oneshot::Canceled> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::BestSeenBlock(tx)); + + rx.await + } + + pub async fn num_sync_peers(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncPeers(tx)); + + rx.await + } + + pub async fn num_queued_blocks(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumQueuedBlocks(tx)); + + rx.await + } + + pub async fn num_downloaded_blocks(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumDownloadedBlocks(tx)); + + rx.await + } + + pub async fn num_sync_requests(&self) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncRequests(tx)); + + rx.await + } + + pub async fn peers_info( + &self, + ) -> Result)>, oneshot::Canceled> { + let (tx, rx) = oneshot::channel(); + let _ = self.tx.unbounded_send(ToServiceCommand::PeersInfo(tx)); + + rx.await + } + + pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { + let _ = self.tx.unbounded_send(ToServiceCommand::OnBlockFinalized(hash, header)); + } } impl NetworkSyncForkRequest> for SyncingService { @@ -174,62 +226,6 @@ impl NetworkBlock> for SyncingService { } } -#[async_trait::async_trait] -impl ChainSyncService for SyncingService { - async fn num_active_peers(&self) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx)); - - rx.await - } - - async fn best_seen_block(&self) -> Result>, oneshot::Canceled> { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::BestSeenBlock(tx)); - - rx.await - } - - async fn num_sync_peers(&self) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncPeers(tx)); - - rx.await - } - - async fn num_queued_blocks(&self) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::NumQueuedBlocks(tx)); - - rx.await - } - - async fn num_downloaded_blocks(&self) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::NumDownloadedBlocks(tx)); - - rx.await - } - - async fn num_sync_requests(&self) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncRequests(tx)); - - rx.await - } - - async fn peers_info(&self) -> Result)>, oneshot::Canceled> { - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send(ToServiceCommand::PeersInfo(tx)); - - rx.await - } - - fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { - let _ = self.tx.unbounded_send(ToServiceCommand::OnBlockFinalized(hash, header)); - } -} - impl sp_consensus::SyncOracle for SyncingService { fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 71eaebe7e512b..df403866aa5ea 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -50,7 +50,7 @@ use sc_consensus::{ }; use sc_network::{ config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - ChainSyncService, Multiaddr, NetworkService, NetworkWorker, + Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ config::{ @@ -956,7 +956,6 @@ where chain: client.clone(), protocol_id, fork_id, - sync_service: sync_service.clone(), metrics_registry: None, block_announce_config, request_response_protocol_configs: [ @@ -1135,7 +1134,6 @@ where while let Poll::Ready(Some(notification)) = peer.finality_notification_stream.as_mut().poll_next(cx) { - use sc_network::ChainSyncService; peer.sync_service.on_block_finalized(notification.hash, notification.header); } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index e34ba82f4a9ec..adf3185432574 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,7 +37,7 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{config::SyncMode, ChainSyncInterface, NetworkService}; +use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ protocol::role::Roles, @@ -942,7 +942,6 @@ where chain: client.clone(), protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - sync_service: sync_service.clone(), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, request_response_protocol_configs: request_response_protocol_configs diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 3b0c5bca39902..dd0b530dd341e 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,9 +42,7 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; -use sc_network_common::{ - config::MultiaddrWithPeerId, service::NetworkBlock, sync::ChainSyncService, -}; +use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; use sc_network_sync::SyncingService; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; From a4f54031146b6bbaae5fe4eedc05e7d06ceb27ba Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 29 Nov 2022 11:00:22 +0200 Subject: [PATCH 18/30] Remove `ChainSync` service tests They were written for the sole purpose of verifying that `NetworWorker` continues to function while the calls are being dispatched to `ChainSync`. --- .../network/src/service/tests/chain_sync.rs | 447 ------------------ client/network/src/service/tests/mod.rs | 8 +- client/network/src/service/tests/service.rs | 37 -- 3 files changed, 3 insertions(+), 489 deletions(-) delete mode 100644 client/network/src/service/tests/chain_sync.rs diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs deleted file mode 100644 index 52ec72dfc7ede..0000000000000 --- a/client/network/src/service/tests/chain_sync.rs +++ /dev/null @@ -1,447 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/* -use crate::{ - config, - service::tests::{TestNetworkBuilder, BLOCK_ANNOUNCE_PROTO_NAME}, -}; - -use futures::prelude::*; -use libp2p::PeerId; -use sc_block_builder::BlockBuilderProvider; -use sc_client_api::HeaderBackend; -use sc_consensus::JustificationSyncLink; -use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId, SetConfig}, - protocol::{event::Event, role::Roles, ProtocolName}, - service::NetworkSyncForkRequest, - sync::{SyncState, SyncStatus}, -}; -use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface, ChainSync}; -use sp_core::H256; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as _}, -}; -use std::{ - sync::{Arc, RwLock}, - task::Poll, - time::Duration, -}; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; - -fn set_default_expecations_no_peers( - chain_sync: &mut MockChainSync, -) { - chain_sync.expect_poll().returning(|_| Poll::Pending); - chain_sync.expect_status().returning(|| SyncStatus { - state: SyncState::Idle, - best_seen_block: None, - num_peers: 0u32, - queued_blocks: 0u32, - state_sync: None, - warp_sync: None, - }); -} - -#[async_std::test] -async fn normal_network_poll_no_peers() { - // build `ChainSync` and set default expectations for it - let mut chain_sync = - Box::new(MockChainSync::::new()); - set_default_expecations_no_peers(&mut chain_sync); - - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // poll the network once - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn request_justification() { - let hash = H256::random(); - let number = 1337u64; - - // build `ChainSyncInterface` provider and and expect - // `JustificationSyncLink::request_justification() to be called once - let mut chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - chain_sync_service - .expect_justification_sync_link_request_justification() - .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) - .once() - .returning(|_, _| ()); - - // build `ChainSync` and set default expecations for it - let mut chain_sync = MockChainSync::::new(); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_chain_sync((Box::new(chain_sync), chain_sync_service)) - .build(); - - // send "request justifiction" message and poll the network - network.service().request_justification(&hash, number); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn clear_justification_requests() { - // build `ChainSyncInterface` provider and expect - // `JustificationSyncLink::clear_justification_requests()` to be called - let mut chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - chain_sync_service - .expect_justification_sync_link_clear_justification_requests() - .once() - .returning(|| ()); - - // build `ChainSync` and set default expecations for it - let mut chain_sync = - Box::new(MockChainSync::::new()); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // send "request justifiction" message and poll the network - network.service().clear_justification_requests(); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn set_sync_fork_request() { - // build `ChainSync` and set default expectations for it - let mut chain_sync = - Box::new(MockChainSync::::new()); - set_default_expecations_no_peers(&mut chain_sync); - - // build `ChainSyncInterface` provider and verify that the `set_sync_fork_request()` - // call is delegated to `ChainSyncInterface` (which eventually forwards it to `ChainSync`) - let mut chain_sync_service = - MockChainSyncInterface::::new(); - - let hash = H256::random(); - let number = 1337u64; - let peers = (0..3).map(|_| PeerId::random()).collect::>(); - let copy_peers = peers.clone(); - - chain_sync_service - .expect_set_sync_fork_request() - .withf(move |in_peers, in_hash, in_number| { - &peers == in_peers && &hash == in_hash && &number == in_number - }) - .once() - .returning(|_, _, _| ()); - - let mut network = TestNetworkBuilder::new() - .with_chain_sync((chain_sync, Box::new(chain_sync_service))) - .build(); - - // send "set sync fork request" message and poll the network - network.service().set_sync_fork_request(copy_peers, hash, number); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -#[async_std::test] -async fn on_block_finalized() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be - // called) - let chain_sync_service = - Box::new(MockChainSyncInterface::::new()); - - // build `ChainSync` and verify that call to `on_block_finalized()` is made - let mut chain_sync = - Box::new(MockChainSync::::new()); - - let at = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap().hash(); - let block = client - .new_block_at(&BlockId::Hash(at), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - let header = block.header.clone(); - let block_number = *header.number(); - let hash = block.hash(); - - chain_sync - .expect_on_block_finalized() - .withf(move |in_hash, in_number| &hash == in_hash && &block_number == in_number) - .once() - .returning(|_, _| ()); - - set_default_expecations_no_peers(&mut chain_sync); - let mut network = TestNetworkBuilder::new() - .with_client(client) - .with_chain_sync((chain_sync, chain_sync_service)) - .build(); - - // send "set sync fork request" message and poll the network - network.network().on_block_finalized(hash, header); - - futures::future::poll_fn(|cx| { - let _ = network.network().poll_unpin(cx); - Poll::Ready(()) - }) - .await; -} - -// report from mock import queue that importing a justification was not successful -// and verify that connection to the peer is closed -#[async_std::test] -async fn invalid_justification_imported() { - struct DummyImportQueueHandle; - - impl - sc_consensus::import_queue::ImportQueueService< - substrate_test_runtime_client::runtime::Block, - > for DummyImportQueueHandle - { - fn import_blocks( - &mut self, - _origin: sp_consensus::BlockOrigin, - _blocks: Vec< - sc_consensus::IncomingBlock, - >, - ) { - } - - fn import_justifications( - &mut self, - _who: sc_consensus::import_queue::RuntimeOrigin, - _hash: substrate_test_runtime_client::runtime::Hash, - _number: sp_runtime::traits::NumberFor, - _justifications: sp_runtime::Justifications, - ) { - } - } - - struct DummyImportQueue( - Arc< - RwLock< - Option<( - PeerId, - substrate_test_runtime_client::runtime::Hash, - sp_runtime::traits::NumberFor, - )>, - >, - >, - DummyImportQueueHandle, - ); - - #[async_trait::async_trait] - impl sc_consensus::ImportQueue for DummyImportQueue { - fn poll_actions( - &mut self, - _cx: &mut futures::task::Context, - link: &mut dyn sc_consensus::Link, - ) { - if let Some((peer, hash, number)) = *self.0.read().unwrap() { - link.justification_imported(peer, &hash, number, false); - } - } - - fn service( - &self, - ) -> Box< - dyn sc_consensus::import_queue::ImportQueueService< - substrate_test_runtime_client::runtime::Block, - >, - > { - Box::new(DummyImportQueueHandle {}) - } - - fn service_ref( - &mut self, - ) -> &mut dyn sc_consensus::import_queue::ImportQueueService< - substrate_test_runtime_client::runtime::Block, - > { - &mut self.1 - } - - async fn run( - self, - _link: Box>, - ) { - } - } - - let justification_info = Arc::new(RwLock::new(None)); - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (service1, mut event_stream1) = TestNetworkBuilder::new() - .with_import_queue(Box::new(DummyImportQueue( - justification_info.clone(), - DummyImportQueueHandle {}, - ))) - .with_listen_addresses(vec![listen_addr.clone()]) - .build() - .start_network(); - - let (service2, mut event_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: service1.local_peer_id, - }], - ..Default::default() - }) - .build() - .start_network(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut event_stream1).await; - wait_for_events(&mut event_stream2).await; - - { - let mut info = justification_info.write().unwrap(); - *info = Some((service2.local_peer_id, H256::random(), 1337u64)); - } - - let wait_disconnection = async { - while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} - }; - - if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) - .await - .is_err() - { - panic!("did not receive disconnection event in time"); - } -} - -#[async_std::test] -async fn disconnect_peer_using_chain_sync_handle() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); - let (chain_sync_network_provider, chain_sync_network_handle) = - sc_network_sync::service::network::NetworkServiceProvider::new(); - let handle_clone = chain_sync_network_handle.clone(); - - let (chain_sync, chain_sync_service, _) = ChainSync::new( - sc_network_common::sync::SyncMode::Full, - client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&config::Role::Full), - Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), - 1u32, - None, - None, - chain_sync_network_handle.clone(), - import_queue, - ProtocolName::from("block-request"), - ProtocolName::from("state-request"), - None, - ) - .unwrap(); - - let (node1, mut event_stream1) = TestNetworkBuilder::new() - .with_listen_addresses(vec![listen_addr.clone()]) - .with_chain_sync((Box::new(chain_sync), Box::new(chain_sync_service))) - .with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle)) - .with_client(client.clone()) - .build() - .start_network(); - - let (node2, mut event_stream2) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id, - }], - ..Default::default() - }) - .with_client(client.clone()) - .build() - .start_network(); - - async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { - let mut notif_received = false; - let mut sync_received = false; - while !notif_received || !sync_received { - match stream.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => notif_received = true, - Event::SyncConnected { .. } => sync_received = true, - _ => {}, - }; - } - } - - wait_for_events(&mut event_stream1).await; - wait_for_events(&mut event_stream2).await; - - handle_clone.disconnect_peer(node2.local_peer_id, BLOCK_ANNOUNCE_PROTO_NAME.into()); - - let wait_disconnection = async { - while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} - }; - - if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) - .await - .is_err() - { - panic!("did not receive disconnection event in time"); - } -} -*/ diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index f22dc167f4401..be8c0d622478b 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -42,8 +42,6 @@ use substrate_test_runtime_client::{ TestClient, TestClientBuilder, TestClientBuilderExt as _, }; -#[cfg(test)] -mod chain_sync; #[cfg(test)] mod service; @@ -131,7 +129,7 @@ impl TestNetworkBuilder { self } - pub fn with_chain_sync( + pub fn _with_chain_sync( mut self, chain_sync: (Box>, Box>), ) -> Self { @@ -139,7 +137,7 @@ impl TestNetworkBuilder { self } - pub fn with_chain_sync_network( + pub fn _with_chain_sync_network( mut self, chain_sync_network: (NetworkServiceProvider, NetworkServiceHandle), ) -> Self { @@ -147,7 +145,7 @@ impl TestNetworkBuilder { self } - pub fn with_import_queue(mut self, import_queue: Box>) -> Self { + pub fn _with_import_queue(mut self, import_queue: Box>) -> Self { self.import_queue = Some(import_queue); self } diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 1ff930de8d73c..dd79eaf365df0 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -418,43 +418,6 @@ fn fallback_name_working() { }); } -// TODO(aaro): fix this test, how though? -// // Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement -// // protocol name and verify that `SyncDisconnected` event is emitted -// #[async_std::test] -// async fn disconnect_sync_peer_using_block_announcement_protocol_name() { -// let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); - -// async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { -// let mut notif_received = false; -// let mut sync_received = false; - -// while !notif_received || !sync_received { -// match stream.next().await.unwrap() { -// Event::NotificationStreamOpened { .. } => notif_received = true, -// Event::SyncConnected { .. } => sync_received = true, -// _ => {}, -// }; -// } -// } - -// wait_for_events(&mut events_stream1).await; -// wait_for_events(&mut events_stream2).await; - -// // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted -// node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); -// assert!(std::matches!( -// events_stream2.next().await, -// Some(Event::NotificationStreamClosed { .. }) -// )); -// let _ = events_stream2.next().await; // ignore the reopen event - -// // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is -// // emitted -// node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); -// assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); -// } - #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { From 2acb77524a1aa52ecbeb4cef696aabd47df2d042 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Tue, 29 Nov 2022 15:05:38 +0200 Subject: [PATCH 19/30] Refactor code --- Cargo.lock | 4 +- bin/node-template/node/src/service.rs | 1 - client/beefy/src/lib.rs | 2 +- client/cli/src/arg_enums.rs | 20 +- client/cli/src/params/network_params.rs | 7 +- client/cli/src/params/node_key_params.rs | 14 +- .../src/communication/tests.rs | 3 + client/finality-grandpa/src/lib.rs | 2 +- client/finality-grandpa/src/observer.rs | 1 - client/network-gossip/src/bridge.rs | 23 +- client/network/Cargo.toml | 1 - client/network/common/Cargo.toml | 5 + client/network/common/src/config.rs | 375 +++++++++++++++++- client/network/common/src/protocol/event.rs | 21 - client/network/common/src/sync.rs | 2 +- client/network/src/behaviour.rs | 33 +- client/network/src/config.rs | 371 +---------------- client/network/src/lib.rs | 3 - client/network/src/protocol.rs | 86 ++-- client/network/src/service.rs | 34 +- client/network/src/service/out_events.rs | 12 - client/network/src/service/tests/mod.rs | 57 +-- client/network/src/service/tests/service.rs | 153 +++---- client/network/sync/src/engine.rs | 90 +++-- client/network/test/src/lib.rs | 62 +-- client/network/transactions/src/lib.rs | 2 +- client/service/src/builder.rs | 64 +-- client/service/src/config.rs | 7 +- 28 files changed, 639 insertions(+), 816 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4af705fe8b560..6436cb91ea367 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8221,7 +8221,6 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", - "tempfile", "thiserror", "unsigned-varint", "zeroize", @@ -8257,6 +8256,7 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ + "array-bytes", "async-trait", "bitflags", "bytes", @@ -8276,7 +8276,9 @@ dependencies = [ "sp-finality-grandpa", "sp-runtime", "substrate-prometheus-endpoint", + "tempfile", "thiserror", + "zeroize", ] [[package]] diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index e8086e6ef7f39..2a5424e7d28a4 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -321,7 +321,6 @@ pub fn new_full(mut config: Configuration) -> Result config: grandpa_config, link: grandpa_link, network, - // TODO(aaro): fix arc sync: Arc::new(sync_service), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 9e1a6777362d9..1a6f726cdaa96 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -42,7 +42,7 @@ use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; use sc_consensus::BlockImport; use sc_network::ProtocolName; -use sc_network_common::{service::NetworkRequest, sync::SyncEventStream}; +use sc_network_common::service::NetworkRequest; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi}; use sp_blockchain::{ diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 20f68bc7fb55e..a0fef3e0f3661 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -250,15 +250,19 @@ pub enum SyncMode { Warp, } -impl Into for SyncMode { - fn into(self) -> sc_network::config::SyncMode { +impl Into for SyncMode { + fn into(self) -> sc_network_common::config::SyncMode { match self { - SyncMode::Full => sc_network::config::SyncMode::Full, - SyncMode::Fast => - sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, - SyncMode::FastUnsafe => - sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, - SyncMode::Warp => sc_network::config::SyncMode::Warp, + SyncMode::Full => sc_network_common::config::SyncMode::Full, + SyncMode::Fast => sc_network_common::config::SyncMode::Fast { + skip_proofs: false, + storage_chain_mode: false, + }, + SyncMode::FastUnsafe => sc_network_common::config::SyncMode::Fast { + skip_proofs: true, + storage_chain_mode: false, + }, + SyncMode::Warp => sc_network_common::config::SyncMode::Warp, } } } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 5580dea45bde6..8f8126f9bf052 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -18,11 +18,8 @@ use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use clap::Args; -use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig}, - multiaddr::Protocol, -}; -use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig}; +use sc_network::{config::NetworkConfiguration, multiaddr::Protocol}; +use sc_network_common::config::{NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig}; use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 2346455c26a37..8955708678cc5 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use clap::Args; -use sc_network::config::{identity::ed25519, NodeKeyConfig}; +use sc_network_common::config::{identity::ed25519, NodeKeyConfig}; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; @@ -92,7 +92,7 @@ impl NodeKeyParams { let secret = if let Some(node_key) = self.node_key.as_ref() { parse_ed25519_secret(node_key)? } else { - sc_network::config::Secret::File( + sc_network_common::config::Secret::File( self.node_key_file .clone() .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)), @@ -111,10 +111,10 @@ fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { } /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. -fn parse_ed25519_secret(hex: &str) -> error::Result { +fn parse_ed25519_secret(hex: &str) -> error::Result { H256::from_str(hex).map_err(invalid_node_key).and_then(|bytes| { ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) + .map(sc_network_common::config::Secret::Input) .map_err(invalid_node_key) }) } @@ -123,7 +123,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), @@ -200,7 +200,7 @@ mod tests { let dir = PathBuf::from(net_config_dir.clone()); let typ = params.node_key_type; params.node_key(net_config_dir).and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + NodeKeyConfig::Ed25519(sc_network_common::config::Secret::File(ref f)) if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 3544725de4397..ee679eec85d97 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -403,6 +403,7 @@ fn good_commit_leads_to_relay() { protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, + received_handshake: vec![], }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { @@ -420,6 +421,7 @@ fn good_commit_leads_to_relay() { protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, + received_handshake: vec![], }); // Announce its local set has being on the current set id through a neighbor @@ -552,6 +554,7 @@ fn bad_commit_leads_to_report() { protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, + received_handshake: vec![], }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 300b71808630e..ea409c4e5b3a1 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -68,7 +68,7 @@ use sc_client_api::{ StorageProvider, TransactionFor, }; use sc_consensus::BlockImport; -use sc_network_common::{protocol::ProtocolName, sync::SyncEventStream}; +use sc_network_common::protocol::ProtocolName; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index f527438e41347..f2ecb8b8b9b17 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -28,7 +28,6 @@ use futures::prelude::*; use log::{debug, info, warn}; use sc_client_api::backend::Backend; -use sc_network_common::sync::SyncEventStream; use sc_telemetry::TelemetryHandle; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index a2fa98e533f57..b294c5ccc1539 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -23,7 +23,7 @@ use crate::{ use sc_network_common::{ protocol::{event::Event, ProtocolName}, - sync::{SyncEvent, SyncEventStream}, + sync::SyncEvent, }; use sc_peerset::ReputationChange; @@ -187,7 +187,6 @@ impl Future for GossipEngine { 'outer: loop { match &mut this.forwarding_state { ForwardingState::Idle => { - // TODO(aaro): can this be refactored? let net_event_stream = this.network_event_stream.poll_next_unpin(cx); let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx); @@ -227,7 +226,7 @@ impl Future for GossipEngine { this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) | Event::UncheckedNotificationStreamOpened { .. } => {}, + Event::Dht(_) => {}, }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { @@ -237,20 +236,15 @@ impl Future for GossipEngine { Poll::Pending => {}, } - // TODO(aaro): this is not correct match sync_event_stream { Poll::Ready(Some(event)) => match event { - SyncEvent::PeerConnected(remote) => { - println!("bridge: {remote:?} connected"); - this.network.add_set_reserved(remote, this.protocol.clone()); - }, - SyncEvent::PeerDisconnected(remote) => { - println!("bridge: {remote:?} disconnected"); + SyncEvent::PeerConnected(remote) => + this.network.add_set_reserved(remote, this.protocol.clone()), + SyncEvent::PeerDisconnected(remote) => this.network.remove_peers_from_reserved_set( this.protocol.clone(), vec![remote], - ); - }, + ), }, // The sync event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { @@ -354,6 +348,7 @@ mod tests { NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSender, NotificationSenderError, }, + sync::SyncEventStream, }; use sp_runtime::{ testing::H256, @@ -456,7 +451,7 @@ mod tests { impl NetworkNotification for TestNetwork { fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { - // TODO(aaro): why this must be disabled + unimplemented!(); } fn notification_sender( @@ -589,6 +584,7 @@ mod tests { protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, + received_handshake: vec![], }) .expect("Event stream is unbounded; qed."); @@ -752,6 +748,7 @@ mod tests { protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, + received_handshake: vec![], }) .expect("Event stream is unbounded; qed."); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index afd9880148081..9ef7fb142bfc4 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -59,7 +59,6 @@ sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } assert_matches = "1.3" async-std = { version = "1.11.0", features = ["attributes"] } rand = "0.7.2" -tempfile = "3.1.0" sc-network-light = { version = "0.10.0-dev", path = "./light" } sc-network-sync = { version = "0.10.0-dev", path = "./sync" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index fd6cd4814ff7d..cc4db63fdb052 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.11" [dependencies] +array-bytes = "4.1" async-trait = "0.1.57" bitflags = "1.3.2" bytes = "1" @@ -38,3 +39,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/final sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } thiserror = "1.0" +zeroize = "1.4.3" + +[dev-dependencies] +tempfile = "3.1.0" diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 96c7c11ec2696..68c0431792c31 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,11 +18,33 @@ //! Configuration of the networking layer. -use crate::protocol; +pub use crate::{ + protocol::{self, role::Role}, + request_responses::{ + IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, + }, + sync::warp::WarpSyncProvider, + ExHashT, +}; +pub use libp2p::{build_multiaddr, core::PublicKey, identity}; use codec::Encode; -use libp2p::{multiaddr, Multiaddr, PeerId}; -use std::{fmt, str, str::FromStr}; +use libp2p::{ + identity::{ed25519, Keypair}, + multiaddr, Multiaddr, PeerId, +}; +use zeroize::Zeroize; + +use std::{ + error::Error, + fmt, fs, + io::{self, Write}, + iter, + net::Ipv4Addr, + path::{Path, PathBuf}, + str, + str::FromStr, +}; /// Protocol name prefix, transmitted on the wire for legacy protocol names. /// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8. @@ -331,3 +353,350 @@ impl NonReservedPeerMode { } } } + +/// Sync operation mode. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum SyncMode { + /// Full block download and verification. + Full, + /// Download blocks and the latest state. + Fast { + /// Skip state proof download and verification. + skip_proofs: bool, + /// Download indexed transactions for recent blocks. + storage_chain_mode: bool, + }, + /// Warp sync - verify authority set transitions and the latest state. + Warp, +} + +impl SyncMode { + /// Returns if `self` is [`Self::Warp`]. + pub fn is_warp(&self) -> bool { + matches!(self, Self::Warp) + } + + /// Returns if `self` is [`Self::Fast`]. + pub fn is_fast(&self) -> bool { + matches!(self, Self::Fast { .. }) + } +} + +impl Default for SyncMode { + fn default() -> Self { + Self::Full + } +} + +/// Network service configuration. +#[derive(Clone, Debug)] +pub struct NetworkConfiguration { + /// Directory path to store network-specific configuration. None means nothing will be saved. + pub net_config_path: Option, + /// Multiaddresses to listen for incoming connections. + pub listen_addresses: Vec, + /// Multiaddresses to advertise. Detected automatically if empty. + pub public_addresses: Vec, + /// List of initial node addresses + pub boot_nodes: Vec, + /// The node key configuration, which determines the node's network identity keypair. + pub node_key: NodeKeyConfig, + /// List of request-response protocols that the node supports. + pub request_response_protocols: Vec, + /// Configuration for the default set of nodes used for block syncing and transactions. + pub default_peers_set: SetConfig, + /// Number of substreams to reserve for full nodes for block syncing and transactions. + /// Any other slot will be dedicated to light nodes. + /// + /// This value is implicitly capped to `default_set.out_peers + default_set.in_peers`. + pub default_peers_set_num_full: u32, + /// Configuration for extra sets of nodes. + pub extra_sets: Vec, + /// Client identifier. Sent over the wire for debugging purposes. + pub client_version: String, + /// Name of the node. Sent over the wire for debugging purposes. + pub node_name: String, + /// Configuration for the transport layer. + pub transport: TransportConfig, + /// Maximum number of peers to ask the same blocks in parallel. + pub max_parallel_downloads: u32, + /// Initial syncing mode. + pub sync_mode: SyncMode, + + /// True if Kademlia random discovery should be enabled. + /// + /// If true, the node will automatically randomly walk the DHT in order to find new peers. + pub enable_dht_random_walk: bool, + + /// Should we insert non-global addresses into the DHT? + pub allow_non_globals_in_dht: bool, + + /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in + /// the presence of potentially adversarial nodes. + pub kademlia_disjoint_query_paths: bool, + /// Enable serving block data over IPFS bitswap. + pub ipfs_server: bool, + + /// Size of Yamux receive window of all substreams. `None` for the default (256kiB). + /// Any value less than 256kiB is invalid. + /// + /// # Context + /// + /// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes + /// to be transferred at a time, where `N` is the Yamux receive window size configurable here. + /// This means, in practice, that every `N` bytes must be acknowledged by the receiver before + /// the sender can send more data. The maximum bandwidth of each notifications substream is + /// therefore `N / round_trip_time`. + /// + /// It is recommended to leave this to `None`, and use a request-response protocol instead if + /// a large amount of data must be transferred. The reason why the value is configurable is + /// that some Substrate users mis-use notification protocols to send large amounts of data. + /// As such, this option isn't designed to stay and will likely get removed in the future. + /// + /// Note that configuring a value here isn't a modification of the Yamux protocol, but rather + /// a modification of the way the implementation works. Different nodes with different + /// configured values remain compatible with each other. + pub yamux_window_size: Option, +} + +impl NetworkConfiguration { + /// Create new default configuration + pub fn new, SV: Into>( + node_name: SN, + client_version: SV, + node_key: NodeKeyConfig, + net_config_path: Option, + ) -> Self { + let default_peers_set = SetConfig::default(); + Self { + net_config_path, + listen_addresses: Vec::new(), + public_addresses: Vec::new(), + boot_nodes: Vec::new(), + node_key, + request_response_protocols: Vec::new(), + default_peers_set_num_full: default_peers_set.in_peers + default_peers_set.out_peers, + default_peers_set, + extra_sets: Vec::new(), + client_version: client_version.into(), + node_name: node_name.into(), + transport: TransportConfig::Normal { enable_mdns: false, allow_private_ipv4: true }, + max_parallel_downloads: 5, + sync_mode: SyncMode::Full, + enable_dht_random_walk: true, + allow_non_globals_in_dht: false, + kademlia_disjoint_query_paths: false, + yamux_window_size: None, + ipfs_server: false, + } + } + + /// Create new default configuration for localhost-only connection with random port (useful for + /// testing) + pub fn new_local() -> NetworkConfiguration { + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect()]; + + config.allow_non_globals_in_dht = true; + config + } + + /// Create new default configuration for localhost-only connection with random port (useful for + /// testing) + pub fn new_memory() -> NetworkConfiguration { + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect()]; + + config.allow_non_globals_in_dht = true; + config + } +} + +/// The configuration of a node's secret key, describing the type of key +/// and how it is obtained. A node's identity keypair is the result of +/// the evaluation of the node key configuration. +#[derive(Clone, Debug)] +pub enum NodeKeyConfig { + /// A Ed25519 secret key configuration. + Ed25519(Secret), +} + +impl Default for NodeKeyConfig { + fn default() -> NodeKeyConfig { + Self::Ed25519(Secret::New) + } +} + +/// The options for obtaining a Ed25519 secret key. +pub type Ed25519Secret = Secret; + +/// The configuration options for obtaining a secret key `K`. +#[derive(Clone)] +pub enum Secret { + /// Use the given secret key `K`. + Input(K), + /// Read the secret key from a file. If the file does not exist, + /// it is created with a newly generated secret key `K`. The format + /// of the file is determined by `K`: + /// + /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. + File(PathBuf), + /// Always generate a new secret key `K`. + New, +} + +impl fmt::Debug for Secret { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Input(_) => f.debug_tuple("Secret::Input").finish(), + Self::File(path) => f.debug_tuple("Secret::File").field(path).finish(), + Self::New => f.debug_tuple("Secret::New").finish(), + } + } +} + +impl NodeKeyConfig { + /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: + /// + /// * If the secret is configured as input, the corresponding keypair is returned. + /// + /// * If the secret is configured as a file, it is read from that file, if it exists. Otherwise + /// a new secret is generated and stored. In either case, the keypair obtained from the + /// secret is returned. + /// + /// * If the secret is configured to be new, it is generated and the corresponding keypair is + /// returned. + pub fn into_keypair(self) -> io::Result { + use NodeKeyConfig::*; + match self { + Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + + Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), + + Ed25519(Secret::File(f)) => get_secret( + f, + |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { + if s.len() == 64 { + array_bytes::hex2bytes(&s).ok() + } else { + None + } + }) { + Some(s) => ed25519::SecretKey::from_bytes(s), + _ => ed25519::SecretKey::from_bytes(&mut b), + }, + ed25519::SecretKey::generate, + |b| b.as_ref().to_vec(), + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), + } + } +} + +/// Load a secret key from a file, if it exists, or generate a +/// new secret key and write it to that file. In either case, +/// the secret key is returned. +fn get_secret(file: P, parse: F, generate: G, serialize: W) -> io::Result +where + P: AsRef, + F: for<'r> FnOnce(&'r mut [u8]) -> Result, + G: FnOnce() -> K, + E: Error + Send + Sync + 'static, + W: Fn(&K) -> Vec, +{ + std::fs::read(&file) + .and_then(|mut sk_bytes| { + parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + }) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound { + file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; + let sk = generate(); + let mut sk_vec = serialize(&sk); + write_secret_file(file, &sk_vec)?; + sk_vec.zeroize(); + Ok(sk) + } else { + Err(e) + } + }) +} + +/// Write secret bytes to a file. +fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> +where + P: AsRef, +{ + let mut file = open_secret_file(&path)?; + file.write_all(sk_bytes) +} + +/// Opens a file containing a secret key in write mode. +#[cfg(unix)] +fn open_secret_file

(path: P) -> io::Result +where + P: AsRef, +{ + use std::os::unix::fs::OpenOptionsExt; + fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path) +} + +/// Opens a file containing a secret key in write mode. +#[cfg(not(unix))] +fn open_secret_file

(path: P) -> Result +where + P: AsRef, +{ + fs::OpenOptions::new().write(true).create_new(true).open(path) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn tempdir_with_prefix(prefix: &str) -> TempDir { + tempfile::Builder::new().prefix(prefix).tempdir().unwrap() + } + + fn secret_bytes(kp: &Keypair) -> Vec { + let Keypair::Ed25519(p) = kp; + p.secret().as_ref().iter().cloned().collect() + } + + #[test] + fn test_secret_file() { + let tmp = tempdir_with_prefix("x"); + std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated + let file = tmp.path().join("x").to_path_buf(); + let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); + assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) + } + + #[test] + fn test_secret_input() { + let sk = ed25519::SecretKey::generate(); + let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); + } + + #[test] + fn test_secret_new() { + let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); + } +} diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index 46ca7b684c854..091de2a244cdb 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -67,27 +67,6 @@ pub enum Event { negotiated_fallback: Option, /// Role of the remote. role: ObservedRole, - }, - - /// Opened a substream with the given node with the given notifications protocol. - /// - /// The protocol is always one of the notification protocols that have been registered. - /// - /// Protocol must validate the handshake and close the substream if the handshake is invalid. - UncheckedNotificationStreamOpened { - /// Node we opened the substream with. - remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - /// This is always equal to the value of - /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the - /// configured sets. - protocol: ProtocolName, - /// If the negotiation didn't use the main name of the protocol (the one in - /// `notifications_protocol`), then this field contains which name has actually been - /// used. - /// Always contains a value equal to the value in - /// `sc_network::config::NonDefaultSetConfig::fallback_names`. - negotiated_fallback: Option, /// Received handshake. received_handshake: Vec, }, diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index f9ff041b19f5f..956ff9c8bbbde 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -23,7 +23,7 @@ pub mod metrics; pub mod warp; use crate::protocol::role::Roles; -use futures::{channel::oneshot, Stream}; +use futures::Stream; use libp2p::PeerId; diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 9abd965d19d5b..32dacbf5d0d27 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -118,26 +118,6 @@ pub enum BehaviourOut { notifications_sink: NotificationsSink, /// Role of the remote. role: ObservedRole, - }, - - /// Opened a substream with the given node with the given notifications protocol. - /// - /// The protocol is always one of the notification protocols that have been registered. - /// - /// Protocol must validate the received handshake and close the substream if the handshake is - /// invalid. - UncheckedNotificationStreamOpened { - /// Node we opened the substream with. - remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, - /// If the negotiation didn't use the main name of the protocol (the one in - /// `notifications_protocol`), then this field contains which name has actually been - /// used. - /// See also [`crate::Event::NotificationStreamOpened`]. - negotiated_fallback: Option, - /// Object that permits sending notifications to the peer. - notifications_sink: NotificationsSink, /// Received handshake. received_handshake: Vec, }, @@ -319,24 +299,13 @@ impl From> for BehaviourOut { protocol, negotiated_fallback, roles, + received_handshake, notifications_sink, } => BehaviourOut::NotificationStreamOpened { remote, protocol, negotiated_fallback, role: reported_roles_to_observed_role(roles), - notifications_sink, - }, - CustomMessageOutcome::UncheckedNotificationStreamOpened { - remote, - protocol, - negotiated_fallback, - received_handshake, - notifications_sink, - } => BehaviourOut::UncheckedNotificationStreamOpened { - remote, - protocol, - negotiated_fallback, received_handshake, notifications_sink, }, diff --git a/client/network/src/config.rs b/client/network/src/config.rs index f3bfc14f931a8..987ad80bb4d75 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -22,7 +22,7 @@ //! See the documentation of [`Params`]. pub use sc_network_common::{ - config::ProtocolId, + config::{NetworkConfiguration, ProtocolId}, protocol::role::Role, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, @@ -33,27 +33,9 @@ pub use sc_network_common::{ pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use core::{fmt, iter}; -use libp2p::{ - identity::{ed25519, Keypair}, - multiaddr, Multiaddr, -}; use prometheus_endpoint::Registry; -use sc_network_common::config::{ - MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig, -}; -use sp_runtime::traits::Block as BlockT; -use std::{ - error::Error, - fs, - future::Future, - io::{self, Write}, - net::Ipv4Addr, - path::{Path, PathBuf}, - pin::Pin, - sync::Arc, -}; -use zeroize::Zeroize; +use sc_network_common::config::NonDefaultSetConfig; +use std::{future::Future, pin::Pin, sync::Arc}; /// Network initialization parameters. pub struct Params { @@ -86,350 +68,3 @@ pub struct Params { /// Request response protocol configurations pub request_response_protocol_configs: Vec, } - -/// Sync operation mode. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum SyncMode { - /// Full block download and verification. - Full, - /// Download blocks and the latest state. - Fast { - /// Skip state proof download and verification. - skip_proofs: bool, - /// Download indexed transactions for recent blocks. - storage_chain_mode: bool, - }, - /// Warp sync - verify authority set transitions and the latest state. - Warp, -} - -impl SyncMode { - /// Returns if `self` is [`Self::Warp`]. - pub fn is_warp(&self) -> bool { - matches!(self, Self::Warp) - } - - /// Returns if `self` is [`Self::Fast`]. - pub fn is_fast(&self) -> bool { - matches!(self, Self::Fast { .. }) - } -} - -impl Default for SyncMode { - fn default() -> Self { - Self::Full - } -} - -/// Network service configuration. -#[derive(Clone, Debug)] -pub struct NetworkConfiguration { - /// Directory path to store network-specific configuration. None means nothing will be saved. - pub net_config_path: Option, - /// Multiaddresses to listen for incoming connections. - pub listen_addresses: Vec, - /// Multiaddresses to advertise. Detected automatically if empty. - pub public_addresses: Vec, - /// List of initial node addresses - pub boot_nodes: Vec, - /// The node key configuration, which determines the node's network identity keypair. - pub node_key: NodeKeyConfig, - /// List of request-response protocols that the node supports. - pub request_response_protocols: Vec, - /// Configuration for the default set of nodes used for block syncing and transactions. - pub default_peers_set: SetConfig, - /// Number of substreams to reserve for full nodes for block syncing and transactions. - /// Any other slot will be dedicated to light nodes. - /// - /// This value is implicitly capped to `default_set.out_peers + default_set.in_peers`. - pub default_peers_set_num_full: u32, - /// Configuration for extra sets of nodes. - pub extra_sets: Vec, - /// Client identifier. Sent over the wire for debugging purposes. - pub client_version: String, - /// Name of the node. Sent over the wire for debugging purposes. - pub node_name: String, - /// Configuration for the transport layer. - pub transport: TransportConfig, - /// Maximum number of peers to ask the same blocks in parallel. - pub max_parallel_downloads: u32, - /// Initial syncing mode. - pub sync_mode: SyncMode, - - /// True if Kademlia random discovery should be enabled. - /// - /// If true, the node will automatically randomly walk the DHT in order to find new peers. - pub enable_dht_random_walk: bool, - - /// Should we insert non-global addresses into the DHT? - pub allow_non_globals_in_dht: bool, - - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in - /// the presence of potentially adversarial nodes. - pub kademlia_disjoint_query_paths: bool, - /// Enable serving block data over IPFS bitswap. - pub ipfs_server: bool, - - /// Size of Yamux receive window of all substreams. `None` for the default (256kiB). - /// Any value less than 256kiB is invalid. - /// - /// # Context - /// - /// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes - /// to be transferred at a time, where `N` is the Yamux receive window size configurable here. - /// This means, in practice, that every `N` bytes must be acknowledged by the receiver before - /// the sender can send more data. The maximum bandwidth of each notifications substream is - /// therefore `N / round_trip_time`. - /// - /// It is recommended to leave this to `None`, and use a request-response protocol instead if - /// a large amount of data must be transferred. The reason why the value is configurable is - /// that some Substrate users mis-use notification protocols to send large amounts of data. - /// As such, this option isn't designed to stay and will likely get removed in the future. - /// - /// Note that configuring a value here isn't a modification of the Yamux protocol, but rather - /// a modification of the way the implementation works. Different nodes with different - /// configured values remain compatible with each other. - pub yamux_window_size: Option, -} - -impl NetworkConfiguration { - /// Create new default configuration - pub fn new, SV: Into>( - node_name: SN, - client_version: SV, - node_key: NodeKeyConfig, - net_config_path: Option, - ) -> Self { - let default_peers_set = SetConfig::default(); - Self { - net_config_path, - listen_addresses: Vec::new(), - public_addresses: Vec::new(), - boot_nodes: Vec::new(), - node_key, - request_response_protocols: Vec::new(), - default_peers_set_num_full: default_peers_set.in_peers + default_peers_set.out_peers, - default_peers_set, - extra_sets: Vec::new(), - client_version: client_version.into(), - node_name: node_name.into(), - transport: TransportConfig::Normal { enable_mdns: false, allow_private_ipv4: true }, - max_parallel_downloads: 5, - sync_mode: SyncMode::Full, - enable_dht_random_walk: true, - allow_non_globals_in_dht: false, - kademlia_disjoint_query_paths: false, - yamux_window_size: None, - ipfs_server: false, - } - } - - /// Create new default configuration for localhost-only connection with random port (useful for - /// testing) - pub fn new_local() -> NetworkConfiguration { - let mut config = - NetworkConfiguration::new("test-node", "test-client", Default::default(), None); - - config.listen_addresses = - vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect()]; - - config.allow_non_globals_in_dht = true; - config - } - - /// Create new default configuration for localhost-only connection with random port (useful for - /// testing) - pub fn new_memory() -> NetworkConfiguration { - let mut config = - NetworkConfiguration::new("test-node", "test-client", Default::default(), None); - - config.listen_addresses = - vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect()]; - - config.allow_non_globals_in_dht = true; - config - } -} - -/// The configuration of a node's secret key, describing the type of key -/// and how it is obtained. A node's identity keypair is the result of -/// the evaluation of the node key configuration. -#[derive(Clone, Debug)] -pub enum NodeKeyConfig { - /// A Ed25519 secret key configuration. - Ed25519(Secret), -} - -impl Default for NodeKeyConfig { - fn default() -> NodeKeyConfig { - Self::Ed25519(Secret::New) - } -} - -/// The options for obtaining a Ed25519 secret key. -pub type Ed25519Secret = Secret; - -/// The configuration options for obtaining a secret key `K`. -#[derive(Clone)] -pub enum Secret { - /// Use the given secret key `K`. - Input(K), - /// Read the secret key from a file. If the file does not exist, - /// it is created with a newly generated secret key `K`. The format - /// of the file is determined by `K`: - /// - /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. - File(PathBuf), - /// Always generate a new secret key `K`. - New, -} - -impl fmt::Debug for Secret { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Input(_) => f.debug_tuple("Secret::Input").finish(), - Self::File(path) => f.debug_tuple("Secret::File").field(path).finish(), - Self::New => f.debug_tuple("Secret::New").finish(), - } - } -} - -impl NodeKeyConfig { - /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: - /// - /// * If the secret is configured as input, the corresponding keypair is returned. - /// - /// * If the secret is configured as a file, it is read from that file, if it exists. Otherwise - /// a new secret is generated and stored. In either case, the keypair obtained from the - /// secret is returned. - /// - /// * If the secret is configured to be new, it is generated and the corresponding keypair is - /// returned. - pub fn into_keypair(self) -> io::Result { - use NodeKeyConfig::*; - match self { - Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), - - Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), - - Ed25519(Secret::File(f)) => get_secret( - f, - |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { - if s.len() == 64 { - array_bytes::hex2bytes(&s).ok() - } else { - None - } - }) { - Some(s) => ed25519::SecretKey::from_bytes(s), - _ => ed25519::SecretKey::from_bytes(&mut b), - }, - ed25519::SecretKey::generate, - |b| b.as_ref().to_vec(), - ) - .map(ed25519::Keypair::from) - .map(Keypair::Ed25519), - } - } -} - -/// Load a secret key from a file, if it exists, or generate a -/// new secret key and write it to that file. In either case, -/// the secret key is returned. -fn get_secret(file: P, parse: F, generate: G, serialize: W) -> io::Result -where - P: AsRef, - F: for<'r> FnOnce(&'r mut [u8]) -> Result, - G: FnOnce() -> K, - E: Error + Send + Sync + 'static, - W: Fn(&K) -> Vec, -{ - std::fs::read(&file) - .and_then(|mut sk_bytes| { - parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) - }) - .or_else(|e| { - if e.kind() == io::ErrorKind::NotFound { - file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; - let sk = generate(); - let mut sk_vec = serialize(&sk); - write_secret_file(file, &sk_vec)?; - sk_vec.zeroize(); - Ok(sk) - } else { - Err(e) - } - }) -} - -/// Write secret bytes to a file. -fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> -where - P: AsRef, -{ - let mut file = open_secret_file(&path)?; - file.write_all(sk_bytes) -} - -/// Opens a file containing a secret key in write mode. -#[cfg(unix)] -fn open_secret_file

(path: P) -> io::Result -where - P: AsRef, -{ - use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path) -} - -/// Opens a file containing a secret key in write mode. -#[cfg(not(unix))] -fn open_secret_file

(path: P) -> Result -where - P: AsRef, -{ - fs::OpenOptions::new().write(true).create_new(true).open(path) -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::TempDir; - - fn tempdir_with_prefix(prefix: &str) -> TempDir { - tempfile::Builder::new().prefix(prefix).tempdir().unwrap() - } - - fn secret_bytes(kp: &Keypair) -> Vec { - let Keypair::Ed25519(p) = kp; - p.secret().as_ref().iter().cloned().collect() - } - - #[test] - fn test_secret_file() { - let tmp = tempdir_with_prefix("x"); - std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated - let file = tmp.path().join("x").to_path_buf(); - let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) - } - - #[test] - fn test_secret_input() { - let sk = ed25519::SecretKey::generate(); - let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); - } - - #[test] - fn test_secret_new() { - let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); - } -} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 86602a5d19e91..fafe410e061cb 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -257,7 +257,6 @@ pub mod network_state; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -use sc_consensus::{JustificationSyncLink, Link}; pub use sc_network_common::{ protocol::{ event::{DhtEvent, Event}, @@ -279,8 +278,6 @@ pub use service::{ DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, }; -use sp_consensus::SyncOracle; -use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sc_peerset::ReputationChange; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 7248dfeae5c24..6f513ca44460a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -41,7 +41,7 @@ use sc_network_common::{ use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ - collections::{HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, io, iter, task::Poll, }; @@ -85,7 +85,7 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, - peers: HashSet, + peers: HashMap, _marker: std::marker::PhantomData, } @@ -188,7 +188,7 @@ where .collect(), bad_handshake_substreams: Default::default(), _marker: Default::default(), - peers: HashSet::new(), + peers: HashMap::new(), }; Ok((protocol, peerset_handle, known_addresses)) @@ -220,7 +220,6 @@ where self.behaviour.peerset_debug_info() } - // TODO(aaro): implement using behaviour? /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { self.peers.len() @@ -356,16 +355,6 @@ pub enum CustomMessageOutcome { /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. negotiated_fallback: Option, roles: Roles, - notifications_sink: NotificationsSink, - }, - /// Notification protocols have been opened with a remote. - /// - /// Protocol must validate the received handshake and close the substream if it is invalid. - UncheckedNotificationStreamOpened { - remote: PeerId, - protocol: ProtocolName, - /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - negotiated_fallback: Option, received_handshake: Vec, notifications_sink: NotificationsSink, }, @@ -488,7 +477,6 @@ where notifications_sink, negotiated_fallback, } => { - self.peers.insert(peer_id); // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { // `received_handshake` can be either a `Status` message if received from the @@ -496,18 +484,21 @@ where // announces substream. match as DecodeAll>::decode_all(&mut &received_handshake[..]) { Ok(GenericMessage::Status(handshake)) => { + let roles = handshake.roles; let handshake = BlockAnnouncesHandshake:: { roles: handshake.roles, best_number: handshake.best_number, best_hash: handshake.best_hash, genesis_hash: handshake.genesis_hash, }; + self.peers.insert(peer_id, roles); - CustomMessageOutcome::UncheckedNotificationStreamOpened { + CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, received_handshake: handshake.encode(), + roles, notifications_sink, } }, @@ -519,40 +510,66 @@ where msg, ); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - self.peers.remove(&peer_id); CustomMessageOutcome::None }, - Err(_err) => CustomMessageOutcome::UncheckedNotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - negotiated_fallback, - received_handshake, - notifications_sink, + Err(_err) => { + match as DecodeAll>::decode_all( + &mut &received_handshake[..], + ) { + Ok(handshake) => { + let roles = handshake.roles; + self.peers.insert(peer_id, roles); + + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)] + .clone(), + negotiated_fallback, + received_handshake, + roles, + notifications_sink, + } + }, + Err(err) => { + log::debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {}", + peer_id, + received_handshake, + err, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + }, + } }, } } else { - // TODO(aaro): fix this - match (Roles::decode_all(&mut &received_handshake[..]), None::) { + match ( + Roles::decode_all(&mut &received_handshake[..]), + self.peers.get(&peer_id), + ) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, roles, + received_handshake, notifications_sink, }, - (Err(_), Some(_peer)) if received_handshake.is_empty() => { - panic!("not supported anymore"); + (Err(_), Some(roles)) if received_handshake.is_empty() => { // As a convenience, we allow opening substreams for "external" // notification protocols with an empty handshake. This fetches the // roles from the locally-known roles. // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 - // CustomMessageOutcome::NotificationStreamOpened { - // remote: peer_id, - // protocol: self.notification_protocols[usize::from(set_id)].clone(), - // negotiated_fallback, - // roles: peer.info.roles, - // notifications_sink, - // } + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id)].clone(), + negotiated_fallback, + roles: *roles, + received_handshake, + notifications_sink, + } }, (Err(err), _) => { debug!(target: "sync", "Failed to parse remote handshake: {}", err); @@ -575,7 +592,6 @@ where notifications_sink, } }, - // TODO(aaro): listen on event stream in `SyncingEngine` NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { // The substream that has just been closed had been opened with a bad diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 710d71c38aeb9..d170d2b322fef 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -65,7 +65,7 @@ use sc_network_common::{ request_responses::{IfDisconnected, RequestFailure}, service::{ NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, @@ -74,7 +74,7 @@ use sc_network_common::{ use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sp_runtime::traits::{Block as BlockT, Zero}; use std::{ cmp, collections::{HashMap, HashSet}, @@ -98,7 +98,7 @@ mod out_events; mod tests; pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; -use sc_network_common::service::{NetworkBlock, NetworkRequest}; +use sc_network_common::service::NetworkRequest; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -1426,6 +1426,7 @@ where negotiated_fallback, notifications_sink, role, + received_handshake, })) => { if let Some(metrics) = this.metrics.as_ref() { metrics @@ -1444,33 +1445,6 @@ where protocol, negotiated_fallback, role, - }); - }, - Poll::Ready(SwarmEvent::Behaviour( - BehaviourOut::UncheckedNotificationStreamOpened { - remote, - protocol, - negotiated_fallback, - notifications_sink, - received_handshake, - }, - )) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics - .notifications_streams_opened_total - .with_label_values(&[&protocol]) - .inc(); - } - { - let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks - .insert((remote, protocol.clone()), notifications_sink); - debug_assert!(_previous_value.is_none()); - } - this.event_streams.send(Event::UncheckedNotificationStreamOpened { - remote, - protocol, - negotiated_fallback, received_handshake, }); }, diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 848134e7a89b8..c8f3e1b1a7a3f 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -244,13 +244,6 @@ impl Metrics { .inc_by(num); }); }, - Event::UncheckedNotificationStreamOpened { protocol, .. } => { - format_label("notif-open-", protocol, |protocol_label| { - self.events_total - .with_label_values(&[protocol_label, "sent", name]) - .inc_by(num); - }); - }, Event::NotificationStreamClosed { protocol, .. } => { format_label("notif-closed-", protocol, |protocol_label| { self.events_total @@ -282,11 +275,6 @@ impl Metrics { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); }); }, - Event::UncheckedNotificationStreamOpened { protocol, .. } => { - format_label("notif-open-", protocol, |protocol_label| { - self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); - }); - }, Event::NotificationStreamClosed { protocol, .. } => { format_label("notif-closed-", protocol, |protocol_label| { self.events_total.with_label_values(&[protocol_label, "received", name]).inc(); diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index be8c0d622478b..2a9edc3fdf25d 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -22,7 +22,9 @@ use futures::prelude::*; use libp2p::Multiaddr; use sc_consensus::{ImportQueue, Link}; use sc_network_common::{ - config::{NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, + config::{ + NonDefaultSetConfig, ProtocolId, SetConfig, SyncMode as SyncOperationMod, TransportConfig, + }, protocol::{event::Event, role::Roles}, service::NetworkEventStream, sync::ChainSync as ChainSyncT, @@ -60,14 +62,6 @@ impl TestNetwork { Self { network } } - pub fn service(&self) -> &Arc { - &self.network.service() - } - - pub fn network(&mut self) -> &mut TestNetworkWorker { - &mut self.network - } - pub fn start_network( self, ) -> (Arc, (impl Stream + std::marker::Unpin)) { @@ -90,7 +84,6 @@ struct TestNetworkBuilder { client: Option>, listen_addresses: Vec, set_config: Option, - chain_sync: Option<(Box>, Box>)>, chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>, config: Option, } @@ -103,17 +96,11 @@ impl TestNetworkBuilder { client: None, listen_addresses: Vec::new(), set_config: None, - chain_sync: None, chain_sync_network: None, config: None, } } - pub fn with_client(mut self, client: Arc) -> Self { - self.client = Some(client); - self - } - pub fn with_config(mut self, config: config::NetworkConfiguration) -> Self { self.config = Some(config); self @@ -129,27 +116,6 @@ impl TestNetworkBuilder { self } - pub fn _with_chain_sync( - mut self, - chain_sync: (Box>, Box>), - ) -> Self { - self.chain_sync = Some(chain_sync); - self - } - - pub fn _with_chain_sync_network( - mut self, - chain_sync_network: (NetworkServiceProvider, NetworkServiceHandle), - ) -> Self { - self.chain_sync_network = Some(chain_sync_network); - self - } - - pub fn _with_import_queue(mut self, import_queue: Box>) -> Self { - self.import_queue = Some(import_queue); - self - } - pub fn build(mut self) -> TestNetwork { let client = self.client.as_mut().map_or( Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), @@ -245,31 +211,16 @@ impl TestNetworkBuilder { Roles::from(&config::Role::Full), client.clone(), None, - match network_config.sync_mode { - config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode, - }, - config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, + &network_config, protocol_id.clone(), &None, Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), - network_config.max_parallel_downloads, None, chain_sync_network_handle, import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), None, - std::num::NonZeroUsize::new(16).unwrap(), - HashSet::new(), - HashSet::new(), - HashSet::new(), - 0usize, - 0usize, ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index dd79eaf365df0..75ebe3b8ce752 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -208,85 +208,85 @@ fn notifications_state_consistent() { future::Either::Left(Event::Dht(_)) => {}, future::Either::Right(Event::Dht(_)) => {}, - future::Either::Left(Event::UncheckedNotificationStreamOpened { .. }) => {}, - future::Either::Right(Event::UncheckedNotificationStreamOpened { .. }) => {}, + future::Either::Left(Event::NotificationStreamOpened { .. }) => {}, + future::Either::Right(Event::NotificationStreamOpened { .. }) => {}, }; } }); } -#[async_std::test] -async fn lots_of_incoming_peers_works() { - sp_tracing::try_init_simple(); - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (main_node, _) = TestNetworkBuilder::new() - .with_listen_addresses(vec![listen_addr.clone()]) - .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) - .build() - .start_network(); - - let main_node_peer_id = main_node.local_peer_id(); - - // We spawn background tasks and push them in this `Vec`. They will all be waited upon before - // this test ends. - let mut background_tasks_to_wait = Vec::new(); - - for _ in 0..32 { - let (_dialing_node, event_stream) = TestNetworkBuilder::new() - .with_set_config(SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id, - }], - ..Default::default() - }) - .build() - .start_network(); - - background_tasks_to_wait.push(async_std::task::spawn(async move { - // Create a dummy timer that will "never" fire, and that will be overwritten when we - // actually need the timer. Using an Option would be technically cleaner, but it would - // make the code below way more complicated. - let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); - - let mut event_stream = event_stream.fuse(); - let mut sync_protocol_name = None; - loop { - futures::select! { - _ = timer => { - // Test succeeds when timer fires. - return; - } - ev = event_stream.next() => { - match ev.unwrap() { - Event::UncheckedNotificationStreamOpened { protocol, .. } => { - if let None = sync_protocol_name { - sync_protocol_name = Some(protocol.clone()); - } - } - Event::NotificationStreamOpened { remote, .. } => { - assert_eq!(remote, main_node_peer_id); - // Test succeeds after 5 seconds. This timer is here in order to - // detect a potential problem after opening. - timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); - } - Event::NotificationStreamClosed { protocol, .. } => { - if Some(protocol) != sync_protocol_name { - // Test failed. - panic!(); - } - } - _ => {} - } - } - } - } - })); - } - - future::join_all(background_tasks_to_wait).await; -} +// #[async_std::test] +// async fn lots_of_incoming_peers_works() { +// sp_tracing::try_init_simple(); +// let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + +// let (main_node, _) = TestNetworkBuilder::new() +// .with_listen_addresses(vec![listen_addr.clone()]) +// .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) +// .build() +// .start_network(); + +// let main_node_peer_id = main_node.local_peer_id(); + +// // We spawn background tasks and push them in this `Vec`. They will all be waited upon before +// // this test ends. +// let mut background_tasks_to_wait = Vec::new(); + +// for _ in 0..32 { +// let (_dialing_node, event_stream) = TestNetworkBuilder::new() +// .with_set_config(SetConfig { +// reserved_nodes: vec![MultiaddrWithPeerId { +// multiaddr: listen_addr.clone(), +// peer_id: main_node_peer_id, +// }], +// ..Default::default() +// }) +// .build() +// .start_network(); + +// background_tasks_to_wait.push(async_std::task::spawn(async move { +// // Create a dummy timer that will "never" fire, and that will be overwritten when we +// // actually need the timer. Using an Option would be technically cleaner, but it would +// // make the code below way more complicated. +// let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); + +// let mut event_stream = event_stream.fuse(); +// let mut sync_protocol_name = None; +// loop { +// futures::select! { +// _ = timer => { +// // Test succeeds when timer fires. +// return; +// } +// ev = event_stream.next() => { +// match ev.unwrap() { +// Event::NotificationStreamOpened { protocol, .. } => { +// if let None = sync_protocol_name { +// sync_protocol_name = Some(protocol.clone()); +// } +// } +// Event::NotificationStreamOpened { remote, .. } => { +// assert_eq!(remote, main_node_peer_id); +// // Test succeeds after 5 seconds. This timer is here in order to +// // detect a potential problem after opening. +// timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); +// } +// Event::NotificationStreamClosed { protocol, .. } => { +// if Some(protocol) != sync_protocol_name { +// // Test failed. +// panic!(); +// } +// } +// _ => {} +// } +// } +// } +// } +// })); +// } + +// future::join_all(background_tasks_to_wait).await; +// } #[test] fn notifications_back_pressure() { @@ -304,12 +304,13 @@ fn notifications_back_pressure() { while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { - Event::UncheckedNotificationStreamOpened { protocol, .. } => + Event::NotificationStreamOpened { protocol, .. } => if let None = sync_protocol_name { sync_protocol_name = Some(protocol); }, Event::NotificationStreamClosed { protocol, .. } => { - if Some(protocol) != sync_protocol_name { + if Some(&protocol) != sync_protocol_name.as_ref() { + println!("{protocol:?} vs {sync_protocol_name:?}"); panic!() } }, diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 98f906935c974..b87d754d7af1c 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -32,7 +32,9 @@ use codec::{Decode, DecodeAll, Encode}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network_common::{ - config::{NonDefaultSetConfig, ProtocolId}, + config::{ + NetworkConfiguration, NonDefaultSetConfig, ProtocolId, SyncMode as SyncOperationMode, + }, protocol::{event::Event, role::Roles, ProtocolName}, sync::{ message::{ @@ -167,11 +169,10 @@ pub struct Peer { pub known_blocks: LruHashSet, } -// TODO(aaro): reorder these properly and remove stuff that is not needed pub struct SyncingEngine { /// State machine that handles the list of in-progress requests. Only full node peers are /// registered. - pub chain_sync: Box>, + chain_sync: Box>, /// Blockchain client. client: Arc, @@ -201,17 +202,17 @@ pub struct SyncingEngine { tick_timeout: Pin + Send>>, /// All connected peers. Contains both full and light node peers. - pub peers: HashMap>, + peers: HashMap>, /// List of nodes for which we perform additional logging because they are important for the /// user. - pub important_peers: HashSet, + important_peers: HashSet, /// Actual list of connected no-slot nodes. - pub default_peers_set_no_slot_connected_peers: HashSet, + default_peers_set_no_slot_connected_peers: HashSet, /// List of nodes that should never occupy peer slots. - pub default_peers_set_no_slot_peers: HashSet, + default_peers_set_no_slot_peers: HashSet, /// Value that was passed as part of the configuration. Used to cap the number of full /// nodes. @@ -221,10 +222,10 @@ pub struct SyncingEngine { default_peers_set_num_light: usize, /// A cache for the data that was associated to a block announcement. - pub block_announce_data_cache: LruCache>, + block_announce_data_cache: LruCache>, /// The `PeerId`'s of all boot nodes. - pub boot_node_ids: HashSet, + boot_node_ids: HashSet, /// Protocol name used for block announcements block_announce_protocol_name: ProtocolName, @@ -244,29 +245,74 @@ where + Sync + 'static, { - // TODO(aaro): clean up these parameters pub fn new( roles: Roles, client: Arc, metrics_registry: Option<&Registry>, - mode: SyncMode, + network_config: &NetworkConfiguration, protocol_id: ProtocolId, fork_id: &Option, block_announce_validator: Box + Send>, - max_parallel_downloads: u32, warp_sync_provider: Option>>, network_service: service::network::NetworkServiceHandle, import_queue: Box>, block_request_protocol_name: ProtocolName, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - cache_capacity: NonZeroUsize, - important_peers: HashSet, - boot_node_ids: HashSet, - default_peers_set_no_slot_peers: HashSet, - default_peers_set_num_full: usize, - default_peers_set_num_light: usize, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { + let mode = match network_config.sync_mode { + SyncOperationMode::Full => SyncMode::Full, + SyncOperationMode::Fast { skip_proofs, storage_chain_mode } => + SyncMode::LightState { skip_proofs, storage_chain_mode }, + SyncOperationMode::Warp => SyncMode::Warp, + }; + let max_parallel_downloads = network_config.max_parallel_downloads; + let cache_capacity = NonZeroUsize::new( + (network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize) + .max(1), + ) + .expect("cache capacity is not zero"); + let important_peers = { + let mut imp_p = HashSet::new(); + for reserved in &network_config.default_peers_set.reserved_nodes { + imp_p.insert(reserved.peer_id); + } + for reserved in network_config + .extra_sets + .iter() + .flat_map(|s| s.set_config.reserved_nodes.iter()) + { + imp_p.insert(reserved.peer_id); + } + imp_p.shrink_to_fit(); + imp_p + }; + let boot_node_ids = { + let mut list = HashSet::new(); + for node in &network_config.boot_nodes { + list.insert(node.peer_id); + } + list.shrink_to_fit(); + list + }; + let default_peers_set_no_slot_peers = { + let mut no_slot_p: HashSet = network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|reserved| reserved.peer_id) + .collect(); + no_slot_p.shrink_to_fit(); + no_slot_p + }; + let default_peers_set_num_full = network_config.default_peers_set_num_full as usize; + let default_peers_set_num_light = { + let total = network_config.default_peers_set.out_peers + + network_config.default_peers_set.in_peers; + total.saturating_sub(network_config.default_peers_set_num_full) as usize + }; + let (chain_sync, block_announce_config) = ChainSync::new( mode, client.clone(), @@ -371,7 +417,6 @@ where } } - // TODO(aaro): emit peernewbest event? /// Process the result of the block announce validation. pub fn process_block_announce_validation_result( &mut self, @@ -557,11 +602,8 @@ where while let Poll::Ready(Some(event)) = event_stream.poll_next_unpin(cx) { match event { - Event::UncheckedNotificationStreamOpened { - remote, - protocol, - received_handshake, - .. + Event::NotificationStreamOpened { + remote, protocol, received_handshake, .. } => { if protocol != self.block_announce_protocol_name { continue diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index df403866aa5ea..d701681128e93 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,9 +23,8 @@ mod block_import; mod sync; use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, marker::PhantomData, - num::NonZeroUsize, pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, @@ -48,13 +47,11 @@ use sc_consensus::{ ForkChoiceStrategy, ImportQueue, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, Verifier, }; -use sc_network::{ - config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, - Multiaddr, NetworkService, NetworkWorker, -}; +use sc_network::{Multiaddr, NetworkService, NetworkWorker}; use sc_network_common::{ config::{ - MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, + MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, + ProtocolId, RequestResponseConfig, Role, SyncMode, TransportConfig, }, protocol::{role::Roles, ProtocolName}, service::{NetworkBlock, NetworkEventStream, NetworkStateInfo, NetworkSyncForkRequest}, @@ -885,65 +882,16 @@ where Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), client.clone(), None, - match network_config.sync_mode { - SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode, - }, - SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, + &network_config, protocol_id.clone(), &fork_id, block_announce_validator, - network_config.max_parallel_downloads, Some(warp_sync), chain_sync_network_handle, import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), - NonZeroUsize::new(16).unwrap(), - { - let mut imp_p = HashSet::new(); - for reserved in &network_config.default_peers_set.reserved_nodes { - imp_p.insert(reserved.peer_id); - } - for reserved in network_config - .extra_sets - .iter() - .flat_map(|s| s.set_config.reserved_nodes.iter()) - { - imp_p.insert(reserved.peer_id); - } - imp_p.shrink_to_fit(); - imp_p - }, - { - let mut list = HashSet::new(); - for node in &network_config.boot_nodes { - list.insert(node.peer_id); - } - list.shrink_to_fit(); - list - }, - { - let mut no_slot_p: HashSet = network_config - .default_peers_set - .reserved_nodes - .iter() - .map(|reserved| reserved.peer_id) - .collect(); - no_slot_p.shrink_to_fit(); - no_slot_p - }, - network_config.default_peers_set_num_full as usize, - { - let total = network_config.default_peers_set.out_peers + - network_config.default_peers_set.in_peers; - total.saturating_sub(network_config.default_peers_set_num_full) as usize - }, ) .unwrap(); let sync_service_import_queue = Box::new(sync_service.clone()); diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index 21c47009b7246..a0e069420ab6c 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -340,7 +340,7 @@ where async fn handle_network_event(&mut self, event: Event) { match event { - Event::Dht(_) | Event::UncheckedNotificationStreamOpened { .. } => {}, + Event::Dht(_) => {}, Event::NotificationStreamOpened { remote, protocol, role, .. } if protocol == self.protocol_name => { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index adf3185432574..b854f8dfbe42f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -22,7 +22,7 @@ use crate::{ config::{Configuration, KeystoreConfig, PrometheusConfig}, error::Error, metrics::MetricsService, - start_rpc_servers, PeerId, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, + start_rpc_servers, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; use jsonrpsee::RpcModule; @@ -37,9 +37,10 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{config::SyncMode, NetworkService}; +use sc_network::NetworkService; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ + config::SyncMode, protocol::role::Roles, service::{NetworkEventStream, NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, @@ -74,7 +75,7 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}, BuildStorage, }; -use std::{collections::HashSet, num::NonZeroUsize, str::FromStr, sync::Arc, time::SystemTime}; +use std::{str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. pub type TFullClient = @@ -850,77 +851,22 @@ where protocol_config }; - // TODO(aaro): expose `config.network` through common crate let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (engine, sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), client.clone(), config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), - match config.network.sync_mode { - SyncMode::Full => sc_network_common::sync::SyncMode::Full, - SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, - SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, + &config.network, protocol_id.clone(), &config.chain_spec.fork_id().map(ToOwned::to_owned), block_announce_validator, - config.network.max_parallel_downloads, warp_sync_provider, chain_sync_network_handle, import_queue.service(), block_request_protocol_config.name.clone(), state_request_protocol_config.name.clone(), warp_sync_protocol_config.as_ref().map(|config| config.name.clone()), - NonZeroUsize::new( - (config.network.default_peers_set.in_peers as usize + - config.network.default_peers_set.out_peers as usize) - .max(1), - ) - .expect("cache capacity is not zero"), - { - let mut imp_p = HashSet::new(); - for reserved in &config.network.default_peers_set.reserved_nodes { - imp_p.insert(reserved.peer_id); - } - for reserved in config - .network - .extra_sets - .iter() - .flat_map(|s| s.set_config.reserved_nodes.iter()) - { - imp_p.insert(reserved.peer_id); - } - imp_p.shrink_to_fit(); - imp_p - }, - { - let mut list = HashSet::new(); - for node in &config.network.boot_nodes { - list.insert(node.peer_id); - } - list.shrink_to_fit(); - list - }, - { - let mut no_slot_p: HashSet = config - .network - .default_peers_set - .reserved_nodes - .iter() - .map(|reserved| reserved.peer_id) - .collect(); - no_slot_p.shrink_to_fit(); - no_slot_p - }, - config.network.default_peers_set_num_full as usize, - { - let total = config.network.default_peers_set.out_peers + - config.network.default_peers_set.in_peers; - total.saturating_sub(config.network.default_peers_set_num_full) as usize - }, )?; - let sync_service_import_queue = sync_service.clone(); let sync_service = Arc::new(sync_service); diff --git a/client/service/src/config.rs b/client/service/src/config.rs index e79ff48d6f0ff..87e820583de06 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -22,11 +22,14 @@ pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStra pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; pub use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; pub use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, Role}, + config::{NetworkConfiguration, Role}, Multiaddr, }; pub use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, + config::{ + MultiaddrWithPeerId, NodeKeyConfig, NonDefaultSetConfig, ProtocolId, SetConfig, + TransportConfig, + }, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, From 00852203a23ef8eee2c23068adec02e78b60f7b5 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Fri, 2 Dec 2022 10:12:38 +0200 Subject: [PATCH 20/30] Refactor code --- client/beefy/src/worker.rs | 36 +---- .../src/communication/tests.rs | 2 +- client/network-gossip/src/bridge.rs | 1 - client/network/src/protocol.rs | 19 +-- client/network/src/service.rs | 37 +---- client/network/src/service/tests/mod.rs | 9 +- client/network/src/service/tests/service.rs | 148 +++++++++--------- client/network/sync/src/engine.rs | 7 +- client/network/sync/src/lib.rs | 2 - client/network/sync/src/tests.rs | 82 ---------- 10 files changed, 87 insertions(+), 256 deletions(-) delete mode 100644 client/network/sync/src/tests.rs diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 200823c0326ae..a13bd72d792d1 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -38,7 +38,6 @@ use codec::{Codec, Decode, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_network_common::service::{NetworkEventStream, NetworkRequest}; use sc_network_gossip::GossipEngine; use sc_utils::notification::NotificationReceiver; use sp_api::BlockId; @@ -819,21 +818,6 @@ where ); loop { - // Don't bother voting or requesting justifications during major sync. - if !self.sync.is_major_syncing() { - // If the current target is a mandatory block, - // make sure there's also an on-demand justification request out for it. - if let Some((block, active)) = self.voting_oracle().mandatory_pending() { - // This only starts new request if there isn't already an active one. - self.on_demand_justifications.request(block, active); - } - // There were external events, 'state' is changed, author a vote if needed/possible. - if let Err(err) = self.try_to_vote() { - debug!(target: "beefy", "🥩 {}", err); - } - } else { - debug!(target: "beefy", "🥩 Skipping voting while major syncing."); - } // Act on changed 'state'. self.process_new_state(); @@ -890,11 +874,6 @@ where } }, } - - // Handle pending justifications and/or votes for now GRANDPA finalized blocks. - if let Err(err) = self.try_pending_justif_and_votes() { - debug!(target: "beefy", "🥩 {}", err); - } } } } @@ -976,7 +955,7 @@ pub(crate) mod tests { use sp_blockchain::Backend as BlockchainBackendT; use sp_runtime::traits::{One, Zero}; use substrate_test_runtime_client::{ - runtime::{Block, Digest, DigestItem, Header, H256}, + runtime::{Block, Digest, DigestItem, Header}, Backend, }; @@ -1009,18 +988,7 @@ pub(crate) mod tests { key: &Keyring, min_block_delta: u32, genesis_validator_set: ValidatorSet, - ) -> BeefyWorker< - Block, - Backend, - MmrRootProvider, - Arc>, - // ||||||| 7a76b40dc6 - // TestApi, - // Arc>, - // ======= - // Arc>, - // >>>>>>> import-queue-refactoring - > { + ) -> BeefyWorker, Arc>> { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index ee679eec85d97..8244c135612c6 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -204,7 +204,7 @@ impl SyncEventStream for TestSync { } impl NetworkBlock> for TestSync { - fn announce_block(&self, hash: Hash, _data: Option>) { + fn announce_block(&self, _hash: Hash, _data: Option>) { todo!(); } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index b294c5ccc1539..c28b6d67f8104 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -739,7 +739,6 @@ mod tests { } let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); - let mut _syncevent_sender = sync.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. event_sender diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 6f513ca44460a..6300cd0b7095a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -31,14 +31,13 @@ use libp2p::{ use log::{debug, error, warn}; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; -use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; +use sc_client_api::HeaderBackend; use sc_network_common::{ config::NonReservedPeerMode, error, protocol::{role::Roles, ProtocolName}, sync::message::BlockAnnouncesHandshake, }; -use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -92,13 +91,7 @@ pub struct Protocol { impl Protocol where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Create a new instance. pub fn new( @@ -377,13 +370,7 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { type ConnectionHandler = ::ConnectionHandler; type OutEvent = CustomMessageOutcome; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d170d2b322fef..3a74b99489046 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -54,7 +54,6 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::{MultiaddrWithPeerId, TransportConfig}, error::Error, @@ -73,7 +72,7 @@ use sc_network_common::{ }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block as BlockT, Zero}; use std::{ cmp, @@ -134,13 +133,7 @@ impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Creates the network service. /// @@ -1122,13 +1115,7 @@ pub struct NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -1160,13 +1147,7 @@ impl Future for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { type Output = (); @@ -1732,7 +1713,7 @@ where }; } - // // Update the variables shared with the `NetworkService`. + // Update the variables shared with the `NetworkService`. let num_connected_peers = this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); this.num_connected.store(num_connected_peers, Ordering::Relaxed); @@ -1780,13 +1761,7 @@ impl Unpin for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { } diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index 2a9edc3fdf25d..121cffc93ce08 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -22,12 +22,9 @@ use futures::prelude::*; use libp2p::Multiaddr; use sc_consensus::{ImportQueue, Link}; use sc_network_common::{ - config::{ - NonDefaultSetConfig, ProtocolId, SetConfig, SyncMode as SyncOperationMod, TransportConfig, - }, + config::{NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, protocol::{event::Event, role::Roles}, service::NetworkEventStream, - sync::ChainSync as ChainSyncT, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -35,10 +32,9 @@ use sc_network_sync::{ engine::SyncingEngine, service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, - SyncingService, }; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use substrate_test_runtime_client::{ runtime::{Block as TestBlock, Hash as TestHash}, TestClient, TestClientBuilder, TestClientBuilderExt as _, @@ -50,7 +46,6 @@ mod service; type TestNetworkWorker = NetworkWorker; type TestNetworkService = NetworkService; -const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; const PROTOCOL_NAME: &str = "/foo"; struct TestNetwork { diff --git a/client/network/src/service/tests/service.rs b/client/network/src/service/tests/service.rs index 75ebe3b8ce752..19199831936d7 100644 --- a/client/network/src/service/tests/service.rs +++ b/client/network/src/service/tests/service.rs @@ -32,7 +32,6 @@ type TestNetworkService = NetworkService< substrate_test_runtime_client::runtime::Hash, >; -const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; const PROTOCOL_NAME: &str = "/foo"; /// Builds two nodes and their associated events stream. @@ -207,86 +206,82 @@ fn notifications_state_consistent() { // Add new events here. future::Either::Left(Event::Dht(_)) => {}, future::Either::Right(Event::Dht(_)) => {}, - - future::Either::Left(Event::NotificationStreamOpened { .. }) => {}, - future::Either::Right(Event::NotificationStreamOpened { .. }) => {}, }; } }); } -// #[async_std::test] -// async fn lots_of_incoming_peers_works() { -// sp_tracing::try_init_simple(); -// let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - -// let (main_node, _) = TestNetworkBuilder::new() -// .with_listen_addresses(vec![listen_addr.clone()]) -// .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) -// .build() -// .start_network(); - -// let main_node_peer_id = main_node.local_peer_id(); - -// // We spawn background tasks and push them in this `Vec`. They will all be waited upon before -// // this test ends. -// let mut background_tasks_to_wait = Vec::new(); - -// for _ in 0..32 { -// let (_dialing_node, event_stream) = TestNetworkBuilder::new() -// .with_set_config(SetConfig { -// reserved_nodes: vec![MultiaddrWithPeerId { -// multiaddr: listen_addr.clone(), -// peer_id: main_node_peer_id, -// }], -// ..Default::default() -// }) -// .build() -// .start_network(); - -// background_tasks_to_wait.push(async_std::task::spawn(async move { -// // Create a dummy timer that will "never" fire, and that will be overwritten when we -// // actually need the timer. Using an Option would be technically cleaner, but it would -// // make the code below way more complicated. -// let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); - -// let mut event_stream = event_stream.fuse(); -// let mut sync_protocol_name = None; -// loop { -// futures::select! { -// _ = timer => { -// // Test succeeds when timer fires. -// return; -// } -// ev = event_stream.next() => { -// match ev.unwrap() { -// Event::NotificationStreamOpened { protocol, .. } => { -// if let None = sync_protocol_name { -// sync_protocol_name = Some(protocol.clone()); -// } -// } -// Event::NotificationStreamOpened { remote, .. } => { -// assert_eq!(remote, main_node_peer_id); -// // Test succeeds after 5 seconds. This timer is here in order to -// // detect a potential problem after opening. -// timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); -// } -// Event::NotificationStreamClosed { protocol, .. } => { -// if Some(protocol) != sync_protocol_name { -// // Test failed. -// panic!(); -// } -// } -// _ => {} -// } -// } -// } -// } -// })); -// } - -// future::join_all(background_tasks_to_wait).await; -// } +#[async_std::test] +async fn lots_of_incoming_peers_works() { + sp_tracing::try_init_simple(); + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (main_node, _) = TestNetworkBuilder::new() + .with_listen_addresses(vec![listen_addr.clone()]) + .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) + .build() + .start_network(); + + let main_node_peer_id = main_node.local_peer_id(); + + // We spawn background tasks and push them in this `Vec`. They will all be waited upon before + // this test ends. + let mut background_tasks_to_wait = Vec::new(); + + for _ in 0..32 { + let (_dialing_node, event_stream) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id, + }], + ..Default::default() + }) + .build() + .start_network(); + + background_tasks_to_wait.push(async_std::task::spawn(async move { + // Create a dummy timer that will "never" fire, and that will be overwritten when we + // actually need the timer. Using an Option would be technically cleaner, but it would + // make the code below way more complicated. + let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); + + let mut event_stream = event_stream.fuse(); + let mut sync_protocol_name = None; + loop { + futures::select! { + _ = timer => { + // Test succeeds when timer fires. + return; + } + ev = event_stream.next() => { + match ev.unwrap() { + Event::NotificationStreamOpened { protocol, remote, .. } => { + if let None = sync_protocol_name { + sync_protocol_name = Some(protocol.clone()); + } + + assert_eq!(remote, main_node_peer_id); + // Test succeeds after 5 seconds. This timer is here in order to + // detect a potential problem after opening. + timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); + } + Event::NotificationStreamClosed { protocol, .. } => { + if Some(protocol) != sync_protocol_name { + // Test failed. + panic!(); + } + } + _ => {} + } + } + } + } + })); + } + + future::join_all(background_tasks_to_wait).await; +} #[test] fn notifications_back_pressure() { @@ -310,7 +305,6 @@ fn notifications_back_pressure() { }, Event::NotificationStreamClosed { protocol, .. } => { if Some(&protocol) != sync_protocol_name.as_ref() { - println!("{protocol:?} vs {sync_protocol_name:?}"); panic!() } }, diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index b87d754d7af1c..082908dc7aa95 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -794,7 +794,6 @@ where } } - // TODO: peernewbest /// Called on the first connection between two peers on the default set, after their exchange /// of handshake. /// @@ -923,10 +922,8 @@ where self.chain_sync.send_block_request(who, req); } - self.event_streams.retain(|stream| { - println!("sync: {who:?} connected"); - stream.unbounded_send(SyncEvent::PeerConnected(who)).is_ok() - }); + self.event_streams + .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(who)).is_ok()); Ok(()) } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 80ad00ddc712e..4daa500b91130 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -36,8 +36,6 @@ mod schema; pub mod service; pub mod state; pub mod state_request_handler; -#[cfg(test)] -mod tests; pub mod warp; pub mod warp_request_handler; diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs deleted file mode 100644 index 9b71163771813..0000000000000 --- a/client/network/sync/src/tests.rs +++ /dev/null @@ -1,82 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/* -use crate::{service::network::NetworkServiceProvider, ChainSync, ForkTarget}; - -use libp2p::PeerId; - -use sc_network_common::{ - config::ProtocolId, - protocol::{ - role::{Role, Roles}, - ProtocolName, - }, - service::NetworkSyncForkRequest, - sync::ChainSync as ChainSyncT, -}; -use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_core::H256; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; - -use std::{sync::Arc, task::Poll}; - -// verify that the fork target map is empty, then submit a new sync fork request, -// poll `ChainSync` and verify that a new sync fork request has been registered -#[async_std::test] -async fn delegate_to_chainsync() { - let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); - let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut chain_sync, chain_sync_service, _) = ChainSync::new( - sc_network_common::sync::SyncMode::Full, - Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), - Box::new(DefaultBlockAnnounceValidator), - 1u32, - None, - None, - chain_sync_network_handle, - import_queue, - ProtocolName::from("block-request"), - ProtocolName::from("state-request"), - None, - ) - .unwrap(); - - let hash = H256::random(); - let in_number = 1337u64; - let peers = (0..3).map(|_| PeerId::random()).collect::>(); - - assert!(chain_sync.fork_targets.is_empty()); - chain_sync_service.set_sync_fork_request(peers, hash, in_number); - - futures::future::poll_fn(|cx| { - let _ = chain_sync.poll(cx); - Poll::Ready(()) - }) - .await; - - if let Some(ForkTarget { number, .. }) = chain_sync.fork_targets.get(&hash) { - assert_eq!(number, &in_number); - } else { - panic!("expected to contain `ForkTarget`"); - } -} -*/ From a55a44d826a57b55e7a3fde2803fcd7cafee6c3a Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Fri, 30 Dec 2022 15:48:16 +0200 Subject: [PATCH 21/30] Update client/finality-grandpa/src/communication/tests.rs Co-authored-by: Anton --- client/finality-grandpa/src/communication/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 15946f2fcff08..d20a62cb1e475 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -205,7 +205,7 @@ impl SyncEventStream for TestSync { impl NetworkBlock> for TestSync { fn announce_block(&self, _hash: Hash, _data: Option>) { - todo!(); + unimplemented!(); } fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { From 49240a2f457b03d37c745693c2cdbac2e7163ed4 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Wed, 8 Feb 2023 10:22:13 +0200 Subject: [PATCH 22/30] Fix warnings --- client/network/src/protocol.rs | 5 +---- client/network/sync/src/engine.rs | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5c94cfaa67299..119d512520954 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -41,10 +41,7 @@ use sc_network_common::{ use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ collections::{HashMap, HashSet, VecDeque}, - io, iter, - num::NonZeroUsize, - pin::Pin, - sync::Arc, + iter, task::Poll, }; diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index d5f90b11bde21..2f397b65c5767 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -51,7 +51,6 @@ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use sp_blockchain::HeaderMetadata; use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, CheckedSub, Header, NumberFor, Zero}, SaturatedConversion, }; From 17b68727cccd8f8c73947b299117b52c5a94b1fd Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 20 Feb 2023 12:14:52 +0200 Subject: [PATCH 23/30] Apply review comments --- Cargo.lock | 1 + client/network/src/behaviour.rs | 26 ++----- client/network/src/protocol.rs | 40 ++++------ client/network/src/service.rs | 32 ++++---- client/network/src/service/tests/mod.rs | 5 +- client/network/sync/Cargo.toml | 1 + client/network/sync/src/engine.rs | 78 +++++++++---------- client/network/sync/src/service/chain_sync.rs | 8 ++ client/network/test/src/lib.rs | 4 +- client/service/src/lib.rs | 56 ++++++++----- 10 files changed, 125 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3878e057cbbc4..e0b390d880939 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8666,6 +8666,7 @@ dependencies = [ "async-trait", "fork-tree", "futures", + "futures-timer", "libp2p", "log", "lru", diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index a22344570a32f..d3617d366868a 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -41,7 +41,6 @@ use sc_network_common::{ request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, }; use sc_peerset::{PeersetHandle, ReputationChange}; -use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, time::Duration}; @@ -50,13 +49,9 @@ pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, R /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut")] -pub struct Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ +pub struct Behaviour { /// All the substrate-specific protocols. - substrate: Protocol, + substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a /// cache. peer_info: peer_info::PeerInfoBehaviour, @@ -173,14 +168,10 @@ pub enum BehaviourOut { None, } -impl Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ +impl Behaviour { /// Builds a new `Behaviour`. pub fn new( - substrate: Protocol, + substrate: Protocol, user_agent: String, local_public_key: PublicKey, disco_config: DiscoveryConfig, @@ -248,12 +239,12 @@ where } /// Returns a shared reference to the user protocol. - pub fn user_protocol(&self) -> &Protocol { + pub fn user_protocol(&self) -> &Protocol { &self.substrate } /// Returns a mutable reference to the user protocol. - pub fn user_protocol_mut(&mut self) -> &mut Protocol { + pub fn user_protocol_mut(&mut self) -> &mut Protocol { &mut self.substrate } @@ -291,8 +282,8 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl From> for BehaviourOut { - fn from(event: CustomMessageOutcome) -> Self { +impl From for BehaviourOut { + fn from(event: CustomMessageOutcome) -> Self { match event { CustomMessageOutcome::NotificationStreamOpened { remote, @@ -318,7 +309,6 @@ impl From> for BehaviourOut { BehaviourOut::NotificationStreamClosed { remote, protocol }, CustomMessageOutcome::NotificationsReceived { remote, messages } => BehaviourOut::NotificationsReceived { remote, messages }, - CustomMessageOutcome::_PeerNewBest(_peer_id, _number) => BehaviourOut::None, CustomMessageOutcome::None => BehaviourOut::None, } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 119d512520954..274b225dfe4cb 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -31,14 +31,13 @@ use libp2p::{ use log::{debug, error, warn}; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; -use sc_client_api::HeaderBackend; use sc_network_common::{ config::NonReservedPeerMode, error, protocol::{role::Roles, ProtocolName}, sync::message::BlockAnnouncesHandshake, }; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::Block as BlockT; use std::{ collections::{HashMap, HashSet, VecDeque}, iter, @@ -68,9 +67,9 @@ mod rep { } // Lock must always be taken in order declared here. -pub struct Protocol { +pub struct Protocol { /// Pending list of messages to return from `poll` as a priority. - pending_messages: VecDeque>, + pending_messages: VecDeque, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, /// Handles opening the unique substream and sending and receiving raw messages. @@ -84,15 +83,12 @@ pub struct Protocol { /// solve this, an entry is added to this map whenever an invalid handshake is received. /// Entries are removed when the corresponding "substream closed" is later received. bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, + /// Connected peers. peers: HashMap, - _marker: std::marker::PhantomData, + _marker: std::marker::PhantomData, } -impl Protocol -where - B: BlockT, - Client: HeaderBackend + 'static, -{ +impl Protocol { /// Create a new instance. pub fn new( roles: Roles, @@ -180,8 +176,9 @@ where .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) .collect(), bad_handshake_substreams: Default::default(), - _marker: Default::default(), peers: HashMap::new(), + // TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol` + _marker: Default::default(), }; Ok((protocol, peerset_handle, known_addresses)) @@ -340,7 +337,7 @@ where /// Outcome of an incoming custom message. #[derive(Debug)] #[must_use] -pub enum CustomMessageOutcome { +pub enum CustomMessageOutcome { /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, @@ -361,19 +358,13 @@ pub enum CustomMessageOutcome { NotificationStreamClosed { remote: PeerId, protocol: ProtocolName }, /// Messages have been received on one or more notifications protocols. NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> }, - /// Peer has a reported a new head of chain. - _PeerNewBest(PeerId, NumberFor), /// Now connected to a new peer for syncing purposes. None, } -impl NetworkBehaviour for Protocol -where - B: BlockT, - Client: HeaderBackend + 'static, -{ +impl NetworkBehaviour for Protocol { type ConnectionHandler = ::ConnectionHandler; - type OutEvent = CustomMessageOutcome; + type OutEvent = CustomMessageOutcome; fn new_handler(&mut self) -> Self::ConnectionHandler { self.behaviour.new_handler() @@ -468,7 +459,7 @@ where self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None }, - Err(_err) => { + Err(err) => { match as DecodeAll>::decode_all( &mut &received_handshake[..], ) { @@ -486,13 +477,14 @@ where notifications_sink, } }, - Err(err) => { + Err(err2) => { log::debug!( target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {}", + "Couldn't decode handshake sent by {}: {:?}: {} & {}", peer_id, received_handshake, err, + err2, ); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None @@ -574,7 +566,7 @@ where }, }; - if !matches!(outcome, CustomMessageOutcome::::None) { + if !matches!(outcome, CustomMessageOutcome::None) { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index bddde32dff71e..ff62c88eee8b3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -129,18 +129,19 @@ pub struct NetworkService { _block: PhantomData, } -impl NetworkWorker +impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, { /// Creates the network service. /// /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new + 'static>( + mut params: Params, + ) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -256,7 +257,7 @@ where let num_connected = Arc::new(AtomicUsize::new(0)); // Build the swarm. - let (mut swarm, bandwidth): (Swarm>, _) = { + let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = format!( "{} ({})", params.network_config.client_version, params.network_config.node_name @@ -411,14 +412,14 @@ where // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { - if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { + if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::>::add_external_address( + Swarm::>::add_external_address( &mut swarm, addr.clone(), AddressScore::Infinite, @@ -496,14 +497,14 @@ where /// Returns the local `PeerId`. pub fn local_peer_id(&self) -> &PeerId { - Swarm::>::local_peer_id(&self.network_service) + Swarm::>::local_peer_id(&self.network_service) } /// Returns the list of addresses we are listening on. /// /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. pub fn listen_addresses(&self) -> impl Iterator { - Swarm::>::listeners(&self.network_service) + Swarm::>::listeners(&self.network_service) } /// Get network state. @@ -583,7 +584,7 @@ where .collect() }; - let peer_id = Swarm::>::local_peer_id(swarm).to_base58(); + let peer_id = Swarm::>::local_peer_id(swarm).to_base58(); let listened_addresses = swarm.listeners().cloned().collect(); let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect(); @@ -1121,11 +1122,10 @@ enum ServiceToWorkerMsg { /// /// You are encouraged to poll this in a separate background thread or task. #[must_use = "The NetworkWorker must be polled in order for the network to advance"] -pub struct NetworkWorker +pub struct NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -1134,7 +1134,7 @@ where /// The network service that can be extracted and shared through the codebase. service: Arc>, /// The *actual* network. - network_service: Swarm>, + network_service: Swarm>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver, /// Senders for events that happen on the network. @@ -1153,11 +1153,10 @@ where _block: PhantomData, } -impl Future for NetworkWorker +impl Future for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, { type Output = (); @@ -1729,7 +1728,7 @@ where this.num_connected.store(num_connected_peers, Ordering::Relaxed); { let external_addresses = - Swarm::>::external_addresses(&this.network_service) + Swarm::>::external_addresses(&this.network_service) .map(|r| &r.addr) .cloned() .collect(); @@ -1767,11 +1766,10 @@ where } } -impl Unpin for NetworkWorker +impl Unpin for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend + 'static, { } diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index d357c5b6729ec..bf5260765ef3b 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -43,7 +43,7 @@ use substrate_test_runtime_client::{ #[cfg(test)] mod service; -type TestNetworkWorker = NetworkWorker; +type TestNetworkWorker = NetworkWorker; type TestNetworkService = NetworkService; const PROTOCOL_NAME: &str = "/foo"; @@ -222,7 +222,6 @@ impl TestNetworkBuilder { let worker = NetworkWorker::< substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::Hash, - substrate_test_runtime_client::TestClient, >::new(config::Params { block_announce_config, role: config::Role::Full, @@ -258,7 +257,7 @@ impl TestNetworkBuilder { } }); let stream = worker.service().event_stream("syncing"); - tokio::spawn(async move { engine.run(stream).await }); + tokio::spawn(engine.run(stream)); TestNetwork::new(worker) } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 532e873836204..ea3645fd65f24 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -20,6 +20,7 @@ array-bytes = "4.1" async-trait = "0.1.58" codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } futures = "0.3.21" +futures-timer = "3.0.2" libp2p = "0.50.0" log = "0.4.17" lru = "0.8.1" diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index 2bd4087abc417..be9f589d298e0 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -16,12 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! `SyncingEngine` is the actor responsible for syncing Substrate chain +//! to tip and keep the blockchain up to date with network updates. + use crate::{ service::{self, chain_sync::ToServiceCommand}, ChainSync, ClientError, SyncingService, }; -use futures::{Stream, StreamExt}; +use futures::{FutureExt, Stream, StreamExt}; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{ @@ -29,6 +32,7 @@ use prometheus_endpoint::{ }; use codec::{Decode, DecodeAll, Encode}; +use futures_timer::Delay; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network_common::{ @@ -45,7 +49,7 @@ use sc_network_common::{ BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, PollBlockAnnounceValidation, SyncEvent, SyncMode, }, - utils::{interval, LruHashSet}, + utils::LruHashSet, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::HeaderMetadata; @@ -198,7 +202,7 @@ pub struct SyncingEngine { event_streams: Vec>, /// Interval at which we call `tick`. - tick_timeout: Pin + Send>>, + tick_timeout: Delay, /// All connected peers. Contains both full and light node peers. peers: HashMap>, @@ -359,7 +363,7 @@ where default_peers_set_num_full, default_peers_set_num_light, event_streams: Vec::new(), - tick_timeout: Box::pin(interval(TICK_TIMEOUT)), + tick_timeout: Delay::new(TICK_TIMEOUT), metrics: if let Some(r) = metrics_registry { match Metrics::register(r, is_major_syncing.clone()) { Ok(metrics) => Some(metrics), @@ -595,8 +599,9 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); - while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { + while let Poll::Ready(()) = self.tick_timeout.poll_unpin(cx) { self.report_metrics(); + self.tick_timeout.reset(TICK_TIMEOUT); } while let Poll::Ready(Some(event)) = event_stream.poll_next_unpin(cx) { @@ -714,51 +719,34 @@ where ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), ToServiceCommand::NewBestBlockImported(hash, number) => self.new_best_block_imported(hash, number), - ToServiceCommand::Status(tx) => - if let Err(_) = tx.send(self.chain_sync.status()) { - log::warn!(target: "sync", "Failed to respond to `Status` query"); - }, + ToServiceCommand::Status(tx) => { + let _ = tx.send(self.chain_sync.status()); + }, ToServiceCommand::NumActivePeers(tx) => { - if let Err(_) = tx.send(self.chain_sync.num_active_peers()) { - log::warn!(target: "sync", "response channel closed for `NumActivePeers`"); - } + let _ = tx.send(self.chain_sync.num_active_peers()); }, ToServiceCommand::SyncState(tx) => { - if let Err(_) = tx.send(self.chain_sync.status()) { - log::warn!(target: "sync", "response channel closed for `SyncState`"); - } + let _ = tx.send(self.chain_sync.status()); }, ToServiceCommand::BestSeenBlock(tx) => { - if let Err(_) = tx.send(self.chain_sync.status().best_seen_block) { - log::warn!(target: "sync", "response channel closed for `BestSeenBlock`"); - } + let _ = tx.send(self.chain_sync.status().best_seen_block); }, ToServiceCommand::NumSyncPeers(tx) => { - if let Err(_) = tx.send(self.chain_sync.status().num_peers) { - log::warn!(target: "sync", "response channel closed for `NumSyncPeers`"); - } + let _ = tx.send(self.chain_sync.status().num_peers); }, ToServiceCommand::NumQueuedBlocks(tx) => { - if let Err(_) = tx.send(self.chain_sync.status().queued_blocks) { - log::warn!(target: "sync", "response channel closed for `NumQueuedBlocks`"); - } + let _ = tx.send(self.chain_sync.status().queued_blocks); }, ToServiceCommand::NumDownloadedBlocks(tx) => { - if let Err(_) = tx.send(self.chain_sync.num_downloaded_blocks()) { - log::warn!(target: "sync", "response channel closed for `NumDownloadedBlocks`"); - } + let _ = tx.send(self.chain_sync.num_downloaded_blocks()); }, ToServiceCommand::NumSyncRequests(tx) => { - if let Err(_) = tx.send(self.chain_sync.num_sync_requests()) { - log::warn!(target: "sync", "response channel closed for `NumSyncRequests`"); - } + let _ = tx.send(self.chain_sync.num_sync_requests()); }, ToServiceCommand::PeersInfo(tx) => { let peers_info = self.peers.iter().map(|(id, peer)| (*id, peer.info.clone())).collect(); - if let Err(_) = tx.send(peers_info) { - log::warn!(target: "sync", "response channel closed for `PeersInfo`"); - } + let _ = tx.send(peers_info); }, ToServiceCommand::OnBlockFinalized(hash, header) => self.chain_sync.on_block_finalized(&hash, *header.number()), @@ -782,7 +770,7 @@ where log::debug!(target: "sync", "{} disconnected", peer); } - if let Some(_peer_data) = self.peers.remove(&peer) { + if self.peers.remove(&peer).is_some() { self.chain_sync.peer_disconnected(&peer); self.default_peers_set_no_slot_connected_peers.remove(&peer); self.event_streams @@ -812,17 +800,19 @@ where } if status.genesis_hash != self.genesis_hash { - log::log!( - target: "sync", - if self.important_peers.contains(&who) { log::Level::Warn } else { log::Level::Debug }, - "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash - ); self.network_service.report_peer(who, rep::GENESIS_MISMATCH); self.network_service .disconnect_peer(who, self.block_announce_protocol_name.clone()); - if self.boot_node_ids.contains(&who) { + if self.important_peers.contains(&who) { + log::error!( + target: "sync", + "Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, + ); + } else if self.boot_node_ids.contains(&who) { log::error!( target: "sync", "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", @@ -830,6 +820,12 @@ where self.genesis_hash, status.genesis_hash, ); + } else { + log::debug!( + target: "sync", + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); } return Err(()) diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs index 1ce4956426711..f381acf66e973 100644 --- a/client/network/sync/src/service/chain_sync.rs +++ b/client/network/sync/src/service/chain_sync.rs @@ -81,6 +81,7 @@ impl SyncingService { Self { tx, num_connected, is_major_syncing } } + /// Get the number of active peers. pub async fn num_active_peers(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumActivePeers(tx)); @@ -88,6 +89,7 @@ impl SyncingService { rx.await } + /// Get best seen block. pub async fn best_seen_block(&self) -> Result>, oneshot::Canceled> { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::BestSeenBlock(tx)); @@ -95,6 +97,7 @@ impl SyncingService { rx.await } + /// Get the number of sync peers. pub async fn num_sync_peers(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncPeers(tx)); @@ -102,6 +105,7 @@ impl SyncingService { rx.await } + /// Get the number of queued blocks. pub async fn num_queued_blocks(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumQueuedBlocks(tx)); @@ -109,6 +113,7 @@ impl SyncingService { rx.await } + /// Get the number of downloaded blocks. pub async fn num_downloaded_blocks(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumDownloadedBlocks(tx)); @@ -116,6 +121,7 @@ impl SyncingService { rx.await } + /// Get the number of sync requests. pub async fn num_sync_requests(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::NumSyncRequests(tx)); @@ -123,6 +129,7 @@ impl SyncingService { rx.await } + /// Get peer information. pub async fn peers_info( &self, ) -> Result)>, oneshot::Canceled> { @@ -132,6 +139,7 @@ impl SyncingService { rx.await } + /// Notify the `SyncingEngine` that a block has been finalized. pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) { let _ = self.tx.unbounded_send(ToServiceCommand::OnBlockFinalized(hash, header)); } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 4688c5705cf74..f44bdcb1e29be 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -241,7 +241,7 @@ pub struct Peer { block_import: BlockImportAdapter, select_chain: Option>, backend: Option>, - network: NetworkWorker::Hash, PeersFullClient>, + network: NetworkWorker::Hash>, sync_service: Arc>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, @@ -532,7 +532,7 @@ where } /// Get a reference to the network worker. - pub fn network(&self) -> &NetworkWorker::Hash, PeersFullClient> { + pub fn network(&self) -> &NetworkWorker::Hash> { &self.network } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 96ec3eb012816..1a69363fb95da 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -155,7 +155,7 @@ async fn build_network_future< H: sc_network_common::ExHashT, >( role: Role, - mut network: sc_network::NetworkWorker, + mut network: sc_network::NetworkWorker, client: Arc, mut rpc_rx: TracingUnboundedReceiver>, sync_service: Arc>, @@ -202,11 +202,16 @@ async fn build_network_future< request = rpc_rx.select_next_some() => { match request { sc_rpc::system::Request::Health(sender) => { - let _ = sender.send(sc_rpc::system::Health { - peers: sync_service.peers_info().await.expect("syncing to stay active").len(), - is_syncing: sync_service.is_major_syncing(), - should_have_peers, - }); + match sync_service.peers_info().await { + Ok(info) => { + let _ = sender.send(sc_rpc::system::Health { + peers: info.len(), + is_syncing: sync_service.is_major_syncing(), + should_have_peers, + }); + } + Err(_) => log::error!("`SyncingEngine` shut down"), + } }, sc_rpc::system::Request::LocalPeerId(sender) => { let _ = sender.send(network.local_peer_id().to_base58()); @@ -220,14 +225,19 @@ async fn build_network_future< let _ = sender.send(addresses); }, sc_rpc::system::Request::Peers(sender) => { - let _ = sender.send(sync_service.peers_info().await.expect("syncing to stay active").into_iter().map(|(peer_id, p)| - sc_rpc::system::PeerInfo { - peer_id: peer_id.to_base58(), - roles: format!("{:?}", p.roles), - best_hash: p.best_hash, - best_number: p.best_number, - } - ).collect()); + match sync_service.peers_info().await { + Ok(info) => { + let _ = sender.send(info.into_iter().map(|(peer_id, p)| + sc_rpc::system::PeerInfo { + peer_id: peer_id.to_base58(), + roles: format!("{:?}", p.roles), + best_hash: p.best_hash, + best_number: p.best_number, + } + ).collect()); + }, + Err(_) => log::error!("`SyncingEngine` shut down"), + } } sc_rpc::system::Request::NetworkState(sender) => { if let Ok(network_state) = serde_json::to_value(&network.network_state()) { @@ -278,13 +288,17 @@ async fn build_network_future< sc_rpc::system::Request::SyncState(sender) => { use sc_rpc::system::SyncState; - let best_number = client.info().best_number; - - let _ = sender.send(SyncState { - starting_block, - current_block: best_number, - highest_block: sync_service.best_seen_block().await.expect("syncing to stay active").unwrap_or(best_number), - }); + match sync_service.best_seen_block().await { + Ok(best_seen_block) => { + let best_number = client.info().best_number; + let _ = sender.send(SyncState { + starting_block, + current_block: best_number, + highest_block: best_seen_block.unwrap_or(best_number), + }); + } + Err(_) => log::error!("`SyncingEngine` shut down"), + } } } } From 721cc49b577bef083d08b4478fbfc28aebb51e5e Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 20 Feb 2023 12:24:41 +0200 Subject: [PATCH 24/30] Fix docs --- client/network/sync/src/engine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index be9f589d298e0..e7525e8cc6ba8 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -487,7 +487,7 @@ where /// /// It is required that [`ChainSync::poll_block_announce_validation`] is /// called later to check for finished validations. The result of the validation - /// needs to be passed to [`Protocol::process_block_announce_validation_result`] + /// needs to be passed to [`SyncingEngine::process_block_announce_validation_result`] /// to finish the processing. /// /// # Note From 6851852c13413ffce9987492c5daafdd83bcf61c Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Mon, 20 Feb 2023 12:59:00 +0200 Subject: [PATCH 25/30] Fix test --- .../src/protocol/notifications/behaviour.rs | 90 ++++++++++--------- client/network/src/service/tests/mod.rs | 2 +- 2 files changed, 49 insertions(+), 43 deletions(-) diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index e8165178106da..e6356b39b199f 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -2995,48 +2995,54 @@ mod tests { } } - // #[tokio::test] - // async fn write_notification() { - // let (mut notif, _peerset) = development_notifs(); - // let peer = PeerId::random(); - // let conn = ConnectionId::new(0usize); - // let set_id = sc_peerset::SetId::from(0); - // let connected = ConnectedPoint::Listener { - // local_addr: Multiaddr::empty(), - // send_back_addr: Multiaddr::empty(), - // }; - // let mut conn_yielder = ConnectionYielder::new(); - - // notif.on_swarm_event(FromSwarm::ConnectionEstablished( - // libp2p::swarm::behaviour::ConnectionEstablished { - // peer_id: peer, - // connection_id: conn, - // endpoint: &connected, - // failed_addresses: &[], - // other_established: 0usize, - // }, - // )); - // assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); - - // notif.peerset_report_connect(peer, set_id); - // assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - - // notif.on_connection_handler_event( - // peer, - // conn, - // conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]), - // ); - - // if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { - // assert_eq!(connections[0].0, conn); - // assert!(std::matches!(connections[0].1, ConnectionState::Open(_))); - // } else { - // panic!("invalid state"); - // } - - // notif.write_sync_notification(&peer, set_id, vec![1, 3, 3, 7]); - // assert_eq!(conn_yielder.get_next_event(peer, set_id.into()).await, Some(vec![1, 3, 3, 7])); - // } + #[tokio::test] + async fn write_notification() { + let (mut notif, _peerset) = development_notifs(); + let peer = PeerId::random(); + let conn = ConnectionId::new(0usize); + let set_id = sc_peerset::SetId::from(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + let mut conn_yielder = ConnectionYielder::new(); + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + notif.peerset_report_connect(peer, set_id); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); + + notif.on_connection_handler_event( + peer, + conn, + conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]), + ); + + if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { + assert_eq!(connections[0].0, conn); + assert!(std::matches!(connections[0].1, ConnectionState::Open(_))); + } else { + panic!("invalid state"); + } + + notif + .peers + .get(&(peer, set_id)) + .unwrap() + .get_open() + .unwrap() + .send_sync_notification(vec![1, 3, 3, 7]); + assert_eq!(conn_yielder.get_next_event(peer, set_id.into()).await, Some(vec![1, 3, 3, 7])); + } #[test] fn peerset_report_connect_backoff_expired() { diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index bf5260765ef3b..2d90cbc537825 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -37,7 +37,7 @@ use sp_runtime::traits::{Block as BlockT, Header as _}; use std::sync::Arc; use substrate_test_runtime_client::{ runtime::{Block as TestBlock, Hash as TestHash}, - TestClient, TestClientBuilder, TestClientBuilderExt as _, + TestClientBuilder, TestClientBuilderExt as _, }; #[cfg(test)] From e105fe5f68f1509fb836cbd29121daf5158db13c Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Thu, 2 Mar 2023 12:36:23 +0200 Subject: [PATCH 26/30] cargo-fmt --- bin/node-template/node/src/service.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index e281cb3b9cd98..34e4e566d92fc 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -59,7 +59,7 @@ pub fn new_partial( ServiceError, > { if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())); + return Err(ServiceError::Other("Remote Keystores are not supported.".into())) } let telemetry = config @@ -170,12 +170,11 @@ pub fn new_full(mut config: Configuration) -> Result if let Some(url) = &config.keystore_remote { match remote_keystore(url) { Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { + Err(e) => return Err(ServiceError::Other(format!( "Error hooking up remote keystore for {}: {}", url, e - ))) - }, + ))), }; } let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( From d21fffcb8ed0412ab1bf7472d4d1d7d846f7a65c Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Sun, 5 Mar 2023 12:19:58 +0200 Subject: [PATCH 27/30] Update client/network/sync/src/engine.rs Co-authored-by: Anton --- client/network/sync/src/engine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index e7525e8cc6ba8..a1c344f7148d7 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify From 198a695757e96576d5b639f3b535cca63caaf796 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Sun, 5 Mar 2023 12:20:08 +0200 Subject: [PATCH 28/30] Update client/network/sync/src/engine.rs Co-authored-by: Anton --- client/network/sync/src/engine.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index a1c344f7148d7..0eb71641e53fb 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -138,7 +138,7 @@ impl Metrics { pub struct MajorSyncingGauge(Arc); impl MajorSyncingGauge { - /// Registers the `MajorSyncGauge` metric whose value is + /// Registers the [`MajorSyncGauge`] metric whose value is /// obtained from the given `AtomicBool`. fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { prometheus_endpoint::register( From 7c4babc018c1c9dc9055c28a9ae7df21467fbcbd Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sun, 5 Mar 2023 13:14:35 +0200 Subject: [PATCH 29/30] Add missing docs --- client/network/common/src/sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 556933d744969..262da6c202aa3 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -267,6 +267,7 @@ impl fmt::Debug for OpaqueBlockResponse { } } +/// Provides high-level status of syncing. #[async_trait::async_trait] pub trait SyncStatusProvider: Send + Sync { /// Get high-level view of the syncing status. From d028177cd9ccb4da6c1010d5aa691118c5b40dc2 Mon Sep 17 00:00:00 2001 From: Aaro Altonen Date: Sun, 5 Mar 2023 13:16:46 +0200 Subject: [PATCH 30/30] Refactor code --- client/network/sync/src/engine.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/client/network/sync/src/engine.rs b/client/network/sync/src/engine.rs index e7525e8cc6ba8..75ac34dab7a3a 100644 --- a/client/network/sync/src/engine.rs +++ b/client/network/sync/src/engine.rs @@ -511,15 +511,14 @@ where return }, }; - peer.known_blocks.insert(hash); - let is_best = match announce.state.unwrap_or(BlockState::Best) { - BlockState::Best => true, - BlockState::Normal => false, - }; - if peer.info.roles.is_full() { + let is_best = match announce.state.unwrap_or(BlockState::Best) { + BlockState::Best => true, + BlockState::Normal => false, + }; + self.chain_sync.push_block_announce_validation(who, hash, announce, is_best); } }