From 37cf4f0edc3ca1a89e16018b921c41fc4cd31563 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 21 Jul 2022 18:17:06 +0200 Subject: [PATCH 01/14] Pallet tests files --- bin/node-sassafras/node/src/chain_spec.rs | 3 +- frame/sassafras/src/lib.rs | 29 ++--- frame/sassafras/src/mock.rs | 138 ++++++++++++++++++++++ frame/sassafras/src/tests.rs | 41 +++++++ primitives/consensus/sassafras/src/lib.rs | 9 ++ 5 files changed, 202 insertions(+), 18 deletions(-) create mode 100644 frame/sassafras/src/mock.rs create mode 100644 frame/sassafras/src/tests.rs diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index ed189a6964976..76c578d5a2a1d 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -141,10 +141,9 @@ fn testnet_genesis( // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, - sassafras: SassafrasConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), - epoch_config: Some(node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG), + epoch_config: node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG, }, grandpa: GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 06155ec86877d..7a4c618a2c41e 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -1,4 +1,4 @@ -// Sassafras This file is part of Substrate. +// This file is part of Substrate. // Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Consensus extension module for Sassafras consensus. +//! Extension module for Sassafras consensus. //! //! Sassafras is a constant-time block production protocol that aims to ensure that //! there is exactly one block produced with constant time intervals rather multiple @@ -43,8 +43,9 @@ //! To anonymously publish the ticket to the chain a validator sends their tickets //! to a random validator who later puts it on-chain as a transaction. -#![deny(warnings)] -#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +// TODO-SASS-P2 +//#![deny(warnings)] +//#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] use scale_codec::{Decode, Encode}; @@ -67,13 +68,10 @@ pub use sp_consensus_sassafras::{ }; // TODO-SASS-P2: tests and benches - -//#[cfg(test)] -//mod mock; -// -//#[cfg(test)] -//mod tests; -// +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; //#[cfg(feature = "runtime-benchmarks")] //mod benchmarking; @@ -231,7 +229,7 @@ pub mod pallet { /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] - pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; /// Current session tickets. #[pallet::storage] @@ -251,16 +249,14 @@ pub mod pallet { /// Genesis authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Genesis epoch configuration. - pub epoch_config: Option, + pub epoch_config: SassafrasEpochConfiguration, } #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); - EpochConfig::::put( - self.epoch_config.clone().expect("epoch_config must not be None"), - ); + EpochConfig::::put(self.epoch_config.clone()); } } @@ -622,6 +618,7 @@ impl Pallet { }) .next(); + // ANDRE // TODO-SASS-P2: maybe here we have to assert! the presence of pre_digest... // Every valid sassafras block should come with a pre-digest diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs new file mode 100644 index 0000000000000..64e7af3b55c2d --- /dev/null +++ b/frame/sassafras/src/mock.rs @@ -0,0 +1,138 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras consensus. + +// TODO-SASS-P2 remove +#![allow(unused_imports)] + +use crate::{self as pallet_sassafras, Authorities, Config}; + +use frame_support::{ + parameter_types, + traits::{ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnInitialize}, +}; +use scale_codec::Encode; +use sp_consensus_sassafras::{AuthorityId, AuthorityPair, Slot}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::{ + crypto::{IsWrappedBy, KeyTypeId, Pair}, + H256, U256, +}; +use sp_runtime::{ + curve::PiecewiseLinear, + impl_opaque_keys, + testing::{Digest, DigestItem, Header, TestXt}, + traits::{Header as _, IdentityLookup, OpaqueKeys}, + Perbill, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +type DummyValidatorId = u64; + +type AccountData = u128; + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} + +impl frame_system::Config for Test { + type Event = Event; + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Version = (); + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = DummyValidatorId; + type Lookup = IdentityLookup; + type Header = Header; + type BlockHashCount = ConstU64<250>; + type PalletInfo = PalletInfo; + type AccountData = AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); //Sassafras; + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type EpochDuration = ConstU64<3>; + type ExpectedBlockTime = ConstU64<1>; + type EpochChangeTrigger = crate::SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32<3>; + type MaxSubmittedTickets = ConstU32<3>; +} + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len).1 +} + +pub fn new_test_ext_with_pairs( + authorities_len: usize, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities = pairs.iter().map(|p| (p.public(), 1)).collect(); + + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let config = pallet_sassafras::GenesisConfig { authorities, epoch_config: Default::default() }; + >::assimilate_storage(&config, &mut t) + .unwrap(); + + (pairs, t.into()) +} diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs new file mode 100644 index 0000000000000..8d9ce6acd6940 --- /dev/null +++ b/frame/sassafras/src/tests.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras consensus. + +// TODO-SASS-P2 remove +#![allow(unused_imports)] + +use super::{Call, *}; +use frame_support::{ + assert_err, assert_noop, assert_ok, + traits::{Currency, EstimateNextSessionRotation, OnFinalize}, + weights::{GetDispatchInfo, Pays}, +}; +use mock::*; +use pallet_session::ShouldEndSession; +use sp_consensus_sassafras::{SassafrasEpochConfiguration, Slot}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::crypto::Pair; + +#[test] +fn genesis_values() { + new_test_ext(4).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 4); + assert_eq!(EpochConfig::::get(), Default::default()); + }); +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 0546c99c52984..5ecae01fe4da7 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -108,6 +108,15 @@ pub struct SassafrasEpochConfiguration { // L: bound on aa number of tickets that can be gossiped } +// Sensible defaults for Sassafras epoch configuration. +impl Default for SassafrasEpochConfiguration { + fn default() -> Self { + SassafrasEpochConfiguration { + // TODO-SASS-P2 + } + } +} + /// Ticket type. pub type Ticket = VRFOutput; From e29b9a6ef211e45c47bfb2ccd3c6ac7c6d14346c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 26 Jul 2022 18:22:07 +0200 Subject: [PATCH 02/14] Pallet tests for epoch change and tickets submission/enact/claim --- Cargo.lock | 1 + bin/node-sassafras/runtime/src/lib.rs | 2 - client/consensus/sassafras/src/authorship.rs | 4 +- client/consensus/sassafras/src/lib.rs | 8 +- .../consensus/sassafras/src/verification.rs | 4 +- frame/sassafras/Cargo.toml | 1 + frame/sassafras/src/lib.rs | 219 +++++++------- frame/sassafras/src/mock.rs | 126 +++++++- frame/sassafras/src/tests.rs | 280 +++++++++++++++++- primitives/consensus/sassafras/src/digests.rs | 8 +- 10 files changed, 510 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1819c85d7bef..d88ea66c7653f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6253,6 +6253,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-session", "pallet-timestamp", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c458605375ab1..7806e0652c649 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -235,8 +235,6 @@ impl pallet_sassafras::Config for Runtime { type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; type MaxAuthorities = ConstU32; type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; - // TODO-SASS-P4. Add some redundancy before starting tickets drop. - type MaxSubmittedTickets = ConstU32<{ 3 * EPOCH_DURATION_IN_SLOTS as u32 }>; } impl pallet_grandpa::Config for Runtime { diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index c8f39497ffa5e..34f10513b2258 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -75,8 +75,8 @@ pub fn claim_slot( let pre_digest = PreDigest { authority_index: authority_index as u32, slot, - block_vrf_output: VRFOutput(signature.output), - block_vrf_proof: VRFProof(signature.proof.clone()), + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof.clone()), ticket_info, }; Some((pre_digest, authority_id.clone())) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c83b84cb0ff37..083a713034372 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -746,13 +746,13 @@ pub fn find_pre_digest(header: &B::Header) -> Result( let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| { - p.vrf_verify(transcript, &pre_digest.block_vrf_output, &pre_digest.block_vrf_proof) - }) + .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; let info = VerifiedHeaderInfo { diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 1d3839a9dcfb9..b41803cd3bbef 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -31,6 +31,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +hex-literal = "0.3" [features] default = ["std"] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 7a4c618a2c41e..cd2deb0d88f8c 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -77,36 +77,6 @@ mod tests; pub use pallet::*; -/// Trigger an epoch change, if any should take place. -pub trait EpochChangeTrigger { - /// Trigger an epoch change, if any should take place. This should be called - /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); -} - -/// A type signifying to Sassafras that an external trigger for epoch changes -/// (e.g. pallet-session) is used. -pub struct ExternalTrigger; - -impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. -} - -/// A type signifying to Sassafras that it should perform epoch changes with an internal -/// trigger, recycling the same authorities forever. -pub struct SameAuthoritiesForever; - -impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); - let next_authorities = authorities.clone(); - - >::enact_epoch_change(authorities, next_authorities); - } - } -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -150,10 +120,6 @@ pub mod pallet { /// Max number of tickets that are considered for each epoch. #[pallet::constant] type MaxTickets: Get; - - /// Max number of tickets that we are going to consider for each epoch. - #[pallet::constant] - type MaxSubmittedTickets: Get; } // TODO-SASS-P2 @@ -216,7 +182,7 @@ pub mod pallet { #[pallet::storage] pub type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; - /// Current epoch randomness accumulator. + /// Randomness accumulator. #[pallet::storage] pub type RandomnessAccumulator = StorageValue<_, schnorrkel::Randomness, ValueQuery>; @@ -224,7 +190,7 @@ pub mod pallet { /// if per-block initialization has already been called for current block. #[pallet::storage] #[pallet::getter(fn initialized)] - pub type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, PreDigest>; /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. @@ -240,7 +206,7 @@ pub mod pallet { // Each map entry contains a vector of tickets as they are received. #[pallet::storage] pub type NextTickets = - StorageValue<_, BoundedBTreeSet, ValueQuery>; + StorageValue<_, BoundedBTreeSet, ValueQuery>; /// Genesis configuration for Sassafras protocol. #[cfg_attr(feature = "std", derive(Default))] @@ -273,49 +239,41 @@ pub mod pallet { // At the end of the block, we can safely include the new VRF output from // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has - // already occurred at this point, so the under-construction randomness + // already occurred at this point, so the + // + // TODO-SASS-P2 + // under-construction randomness // will only contain outputs from the right epoch. - // TODO-SASS-P2: maybe here we can `expect` that is initialized (panic if not) - if let Some(pre_digest) = Initialized::::take().flatten() { - let authority_index = pre_digest.authority_index; - - let randomness: Option = Authorities::::get() - .get(authority_index as usize) - .and_then(|(authority, _)| { - schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() - }) - .and_then(|pubkey| { - let current_slot = CurrentSlot::::get(); - - let transcript = sp_consensus_sassafras::make_slot_transcript( - &Self::randomness(), - current_slot, - EpochIndex::::get(), - ); - - let vrf_output = pre_digest.block_vrf_output; - - // This has already been verified by the client on block import. - debug_assert!(pubkey - .vrf_verify( - transcript.clone(), - &vrf_output, - &pre_digest.block_vrf_proof - ) - .is_ok()); - - vrf_output.0.attach_input_hash(&pubkey, transcript).ok() - }) - .map(|inout| { - inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX) - }); - - // TODO-SASS-P2: this should be infallible. Randomness should be always deposited. - // Eventually better to panic here? - if let Some(randomness) = randomness { - Self::deposit_randomness(&randomness); - } - } + let pre_digest = Initialized::::take() + .expect("Finalization is called after initialization; qed."); + + let randomness = Authorities::::get() + .get(pre_digest.authority_index as usize) + .and_then(|(authority, _)| { + schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() + }) + .and_then(|pubkey| { + let current_slot = CurrentSlot::::get(); + + let transcript = sp_consensus_sassafras::make_slot_transcript( + &Self::randomness(), + current_slot, + EpochIndex::::get(), + ); + + let vrf_output = pre_digest.vrf_output; + + // This has already been verified by the client on block import. + debug_assert!(pubkey + .vrf_verify(transcript.clone(), &vrf_output, &pre_digest.vrf_proof) + .is_ok()); + + vrf_output.0.attach_input_hash(&pubkey, transcript).ok() + }) + .map(|inout| inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX)) + .expect("Pre-digest contains valid randomness; qed"); + + Self::deposit_randomness(&randomness); } } @@ -439,6 +397,11 @@ impl Pallet { // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having // started at the slot of block 1. We want to use the same randomness and validator set as // signalled in the genesis, so we don't rotate the epoch. + + // TODO-SASS-P2 + // Is now != One required??? + // What if we want epochs with len = 1. In this case we doesn't change epoch correctly + // in slot 1. now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() } @@ -491,12 +454,6 @@ impl Pallet { let randomness = Self::randomness_change_epoch(next_epoch_index); Randomness::::put(randomness); - // // Update the start blocks of the previous and new current epoch. - // >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { - // *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); - // *current_epoch_start_block = >::block_number(); - // }); - // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. @@ -574,33 +531,32 @@ impl Pallet { // TODO-SASS-P2: temporary fix to make the compiler happy #[allow(dead_code)] fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { - if !authorities.is_empty() { - assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); - let bounded_authorities = - WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) - .expect("Initial number of authorities should be lower than T::MaxAuthorities"); - Authorities::::put(&bounded_authorities); - NextAuthorities::::put(&bounded_authorities); - } + //if !authorities.is_empty() { + assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); + //} } fn initialize_genesis_epoch(genesis_slot: Slot) { GenesisSlot::::put(genesis_slot); - debug_assert_ne!(*GenesisSlot::::get(), 0); - // Deposit a log because this is the first block in epoch #0. We use the same values - // as genesis because we haven't collected any randomness yet. + // Deposit a log because this is the first block in epoch #0. + // We use the same values as genesis because we haven't collected any randomness yet. let next = NextEpochDescriptor { authorities: Self::authorities().to_vec(), randomness: Self::randomness(), }; - Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } fn initialize(now: T::BlockNumber) { // Since `initialize` can be called twice (e.g. if session module is present) - // let's ensure that we only do the initialization once per block + // let's ensure that we only do the initialization once per block. + // TODO-SASS-P2: why session calls initialize? if Self::initialized().is_some() { return } @@ -618,26 +574,19 @@ impl Pallet { }) .next(); - // ANDRE - // TODO-SASS-P2: maybe here we have to assert! the presence of pre_digest... - // Every valid sassafras block should come with a pre-digest - - if let Some(ref pre_digest) = pre_digest { - // The slot number of the current block being initialized - let current_slot = pre_digest.slot; - - // On the first non-zero block (i.e. block #1) this is where the first epoch - // (epoch #0) actually starts. We need to adjust internal storage accordingly. - if *GenesisSlot::::get() == 0 { - Self::initialize_genesis_epoch(current_slot) - } + let pre_digest = pre_digest.expect("Valid Sassafras block should have a pre-digest. qed"); // let Some(ref pre_digest) = pre_digest { + let current_slot = pre_digest.slot; + CurrentSlot::::put(current_slot); - CurrentSlot::::put(current_slot); + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + Self::initialize_genesis_epoch(current_slot) } Initialized::::put(pre_digest); - // enact epoch change, if necessary. + // Enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now); } @@ -678,19 +627,21 @@ impl Pallet { ticket_idx as usize }; - // If this is a ticket for an epoch not enacted yet we have to fetch it from the - // `NextTickets` list. For example, this may happen when an author request the first - // ticket of a new epoch. if slot_idx < duration { + // Get a ticket for the current epoch. let tickets = Tickets::::get(); let idx = ticket_index(slot_idx); tickets.get(idx).cloned() - } else { + } else if slot_idx < 2 * duration { + // Get a ticket for the next epoch. Since its state values were not enacted yet, we + // have to fetch it from the `NextTickets` list. This may happen when an author request + // the first ticket of a new epoch. let tickets = NextTickets::::get(); - // Do not use modulus since we want to eventually return `None` for slots crossing the - // epoch boundaries. let idx = ticket_index(slot_idx - duration); tickets.iter().nth(idx).cloned() + } else { + // We have no tickets for the requested slot yet. + None } } @@ -702,6 +653,36 @@ impl Pallet { } } +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); +} + +/// A type signifying to Sassafras that an external trigger for epoch changes +/// (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes with an internal +/// trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: T::BlockNumber) { + if >::should_epoch_change(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_epoch_change(authorities, next_authorities); + } + } +} + impl BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 64e7af3b55c2d..4ad994184174b 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -20,11 +20,13 @@ // TODO-SASS-P2 remove #![allow(unused_imports)] -use crate::{self as pallet_sassafras, Authorities, Config}; +use crate::{self as pallet_sassafras, Authorities, Config, SameAuthoritiesForever}; use frame_support::{ parameter_types, - traits::{ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + traits::{ + ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize, + }, }; use scale_codec::Encode; use sp_consensus_sassafras::{AuthorityId, AuthorityPair, Slot}; @@ -41,6 +43,9 @@ use sp_runtime::{ Perbill, }; +const EPOCH_DURATION: u64 = 10; +const MAX_TICKETS: u32 = 6; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -96,12 +101,11 @@ where } impl pallet_sassafras::Config for Test { - type EpochDuration = ConstU64<3>; + type EpochDuration = ConstU64; type ExpectedBlockTime = ConstU64<1>; - type EpochChangeTrigger = crate::SameAuthoritiesForever; + type EpochChangeTrigger = SameAuthoritiesForever; type MaxAuthorities = ConstU32<10>; - type MaxTickets = ConstU32<3>; - type MaxSubmittedTickets = ConstU32<3>; + type MaxTickets = ConstU32; } frame_support::construct_runtime!( @@ -136,3 +140,113 @@ pub fn new_test_ext_with_pairs( (pairs, t.into()) } + +fn make_ticket_vrf( + slot: Slot, + attempt: u64, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let epoch_start = Sassafras::current_epoch_start(); + // Check if epoch index is going to change on initialization + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += 1; + } + + let transcript = + sp_consensus_sassafras::make_ticket_transcript(&Sassafras::randomness(), attempt, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_tickets( + slot: Slot, + attempts: u64, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> Vec<(VRFOutput, VRFProof)> { + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_vrf(slot, attempt, pair)) + .collect() +} + +fn make_slot_vrf( + slot: Slot, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let epoch_start = Sassafras::current_epoch_start(); + // Check if epoch index is going to change on initialization + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += 1; + } + + let transcript = + sp_consensus_sassafras::make_slot_transcript(&Sassafras::randomness(), slot, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_pre_digest( + authority_index: sp_consensus_sassafras::AuthorityIndex, + slot: sp_consensus_sassafras::Slot, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> sp_consensus_sassafras::digests::PreDigest { + let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); + sp_consensus_sassafras::digests::PreDigest { + authority_index, + slot, + vrf_output, + vrf_proof, + ticket_info: None, + } +} + +pub fn make_wrapped_pre_digest( + authority_index: sp_consensus_sassafras::AuthorityIndex, + slot: sp_consensus_sassafras::Slot, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> Digest { + let pre_digest = make_pre_digest(authority_index, slot, pair); + let log = + DigestItem::PreRuntime(sp_consensus_sassafras::SASSAFRAS_ENGINE_ID, pre_digest.encode()); + Digest { logs: vec![log] } +} + +pub fn go_to_block(number: u64, slot: u64, pair: &sp_consensus_sassafras::AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_wrapped_pre_digest(0, slot.into(), pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Slots will grow accordingly to blocks +pub fn progress_to_block( + number: u64, + pair: &sp_consensus_sassafras::AuthorityPair, +) -> Option { + let mut slot = u64::from(Sassafras::current_slot()) + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot += 1; + } + digest +} diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 8d9ce6acd6940..cf20eea03a272 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -20,13 +20,15 @@ // TODO-SASS-P2 remove #![allow(unused_imports)] -use super::{Call, *}; +use crate::*; +use mock::*; + use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{Currency, EstimateNextSessionRotation, OnFinalize}, + traits::{Currency, EstimateNextSessionRotation, OnFinalize, OnInitialize}, weights::{GetDispatchInfo, Pays}, }; -use mock::*; +use hex_literal::hex; use pallet_session::ShouldEndSession; use sp_consensus_sassafras::{SassafrasEpochConfiguration, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; @@ -39,3 +41,275 @@ fn genesis_values() { assert_eq!(EpochConfig::::get(), Default::default()); }); } + +#[test] +fn on_first_after_genesis_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let genesis_slot = Slot::from(100); + let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); + + System::initialize(&1, &Default::default(), &digest); + Sassafras::on_initialize(1); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!(RandomnessAccumulator::::get(), [0; 32]); + + Sassafras::on_finalize(1); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("98dc63bd10704f60016011be269a02ec780e9b870222d12457ea7e8a05065028"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let genesis_slot = Slot::from(100); + + let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); + System::initialize(&1, &Default::default(), &digest); + Sassafras::on_initialize(1); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("98dc63bd10704f60016011be269a02ec780e9b870222d12457ea7e8a05065028"), + ); + + Sassafras::on_finalize(1); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("180f852e5a4f4370071072402c395758efdb2a417e99deaed34acc269125ac3e"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }) +} + +#[test] +fn on_epoch_change_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let genesis_slot = Slot::from(100); + + let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); + System::initialize(&1, &Default::default(), &digest); + Sassafras::on_initialize(1); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let digest = progress_to_block(1 + epoch_duration, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32],); + assert_eq!( + NextRandomness::::get(), + hex!("dae0db238bd08ec36537d924cade5e5ad668e83f4e9a200a1e6aa1102919c999"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("4cfa0840c842f6095155b35bad7f0bf8113c11a12a8ab3e3d116d91b0e8f31f9"), + ); + + Sassafras::on_finalize(1); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), genesis_slot); + assert_eq!(Sassafras::current_slot(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!( + NextRandomness::::get(), + hex!("dae0db238bd08ec36537d924cade5e5ad668e83f4e9a200a1e6aa1102919c999"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("98ed5e9a57afafaea3fddd98555a616f0fefdde27e316ca42cd29de323f90d2a"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn enact_epoch_change() { + // TODO-SASS-P2 + // Check NextRandomness and RandomnessAccumulator enactment + // DO this here, in a specific test for readability +} + +#[test] +fn submit_enact_claim_tickets() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let genesis_slot = Slot::from(100); + + let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); + System::initialize(&1, &Default::default(), &digest); + Sassafras::on_initialize(1); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let _digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Check state before tickets submission + assert!(Tickets::::get().is_empty()); + assert!(NextTickets::::get().is_empty()); + + // Submit authoring tickets. + let mut tickets: Vec = make_tickets(genesis_slot + 1, 30, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + + Sassafras::submit_tickets(Origin::none(), tickets.clone()).unwrap(); + + let max_tickets: u32 = ::MaxTickets::get(); + tickets.sort(); + let front = tickets.iter().take(max_tickets as usize / 2); + let back = tickets.iter().rev().take(max_tickets as usize / 2); + let mut expected_tickets = front.chain(back).map(|t| *t).collect::>(); + expected_tickets.sort(); + + // Check state + assert!(Tickets::::get().is_empty()); + let next_tickets = NextTickets::::get().into_iter().collect::>(); + assert_eq!(expected_tickets, next_tickets); + + // Process up to the last epoch slot (do not enact epoch change) + let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); + assert!(Tickets::::get().is_empty()); + let next_tickets = NextTickets::::get().into_iter().collect::>(); + assert_eq!(expected_tickets, next_tickets); + + // Check if we can claim next epoch tickets in outside-in fashion. + // This is to allow native code to eventually fetch the first ticket for a new epoch, + // before the epoch data is effectivelly enacted by the runtime + // (authors tries to claim a ticket before block construction). + // TODO-SASS-P2 BETTER EXPLANATION + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 3).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 4).is_none()); + assert!(Sassafras::slot_ticket(slot + 7).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 10).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 11).is_none()); + + // Enact epoch tickets by progressing one more block + let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); + let curr_tickets = Tickets::::get().into_iter().collect::>(); + assert_eq!(expected_tickets, curr_tickets); + assert!(NextTickets::::get().is_empty()); + + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 3).is_none()); + assert!(Sassafras::slot_ticket(slot + 6).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 7).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 10).is_none()); + }) +} diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 68116c6b91f70..172cbf2d800f7 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -36,10 +36,10 @@ pub struct PreDigest { pub authority_index: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, - /// Block VRF output. - pub block_vrf_output: VRFOutput, - /// Block VRF proof. - pub block_vrf_proof: VRFProof, + /// Slot VRF output. + pub vrf_output: VRFOutput, + /// Slot VRF proof. + pub vrf_proof: VRFProof, /// Ticket information. pub ticket_info: Option, } From aeef3ac7d5ee29c80b2384c8cd456212012c431b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 3 Aug 2022 18:37:38 +0200 Subject: [PATCH 03/14] Remove randomness collective flip from sassafras node --- Cargo.lock | 1 - bin/node-sassafras/node/src/cli.rs | 4 +++ bin/node-sassafras/node/src/command.rs | 26 ++++++++++++++++++ bin/node-sassafras/runtime/Cargo.toml | 3 -- bin/node-sassafras/runtime/src/lib.rs | 38 +++++++++++--------------- 5 files changed, 46 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d88ea66c7653f..579f7b815984e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4935,7 +4935,6 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-grandpa", - "pallet-randomness-collective-flip", "pallet-sassafras", "pallet-sudo", "pallet-timestamp", diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs index bb2ffa1938107..4ab4d34210c98 100644 --- a/bin/node-sassafras/node/src/cli.rs +++ b/bin/node-sassafras/node/src/cli.rs @@ -36,6 +36,10 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), + /// Sub-commands concerned with benchmarking. + #[clap(subcommand)] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// Try some command against runtime state. #[cfg(feature = "try-runtime")] TryRuntime(try_runtime_cli::TryRuntimeCmd), diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index cf17c37968f54..74ac7dc809802 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -3,6 +3,7 @@ use crate::{ cli::{Cli, Subcommand}, service, }; +use frame_benchmarking_cli::BenchmarkCmd; use node_sassafras_runtime::Block; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -102,6 +103,31 @@ pub fn run() -> sc_cli::Result<()> { Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) }) }, + Some(Subcommand::Benchmark(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + // This switch needs to be in the client, since the client decides + // which sub-commands it wants to support. + match cmd { + BenchmarkCmd::Pallet(cmd) => { + if !cfg!(feature = "runtime-benchmarks") { + return Err( + "Runtime benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into(), + ) + } + + cmd.run::(config) + }, + _ => { + println!("Not implemented..."); + Ok(()) + }, + } + }) + }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 233d9e0e14bbb..278e290406c00 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -19,7 +19,6 @@ pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../ pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } @@ -62,7 +61,6 @@ std = [ "pallet-sassafras/std", "pallet-balances/std", "pallet-grandpa/std", - "pallet-randomness-collective-flip/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -96,7 +94,6 @@ try-runtime = [ "frame-system/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", - "pallet-randomness-collective-flip/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 7806e0652c649..c61bab9e5abc0 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -6,41 +6,32 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use pallet_grandpa::{ - fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, -}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + ApplyExtrinsicResult, MultiSignature, Perbill, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// A few exports that help ease life for downstream crates. -pub use frame_support::{ +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use frame_support::{ construct_runtime, parameter_types, - traits::{ - ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, - }, + traits::{ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem}, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - IdentityFee, Weight, + constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, }, - StorageValue, }; -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::CurrencyAdapter; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; /// An index to a block. pub type BlockNumber = u32; @@ -222,8 +213,6 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl pallet_randomness_collective_flip::Config for Runtime {} - parameter_types! { pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -296,7 +285,6 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, - RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Sassafras: pallet_sassafras, Grandpa: pallet_grandpa, @@ -308,10 +296,13 @@ construct_runtime!( /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; + /// Block header type as expected by this runtime. pub type Header = generic::Header; + /// Block type as expected by this runtime. pub type Block = generic::Block; + /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckNonZeroSender, @@ -323,10 +314,13 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, From 4c6dce85f116220539914a77550426877691c5dd Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 4 Aug 2022 16:53:12 +0200 Subject: [PATCH 04/14] Refactory and epoch skip support draft --- bin/node-sassafras/runtime/Cargo.toml | 1 + bin/node-sassafras/runtime/src/lib.rs | 2 + frame/sassafras/src/benchmarking.rs | 54 ++++++++ frame/sassafras/src/lib.rs | 156 +++++++++++++--------- frame/sassafras/src/mock.rs | 78 +++++------ frame/sassafras/src/tests.rs | 181 ++++++++++++++++++-------- 6 files changed, 310 insertions(+), 162 deletions(-) create mode 100644 frame/sassafras/src/benchmarking.rs diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 278e290406c00..3bcd35d8b020c 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -85,6 +85,7 @@ runtime-benchmarks = [ "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c61bab9e5abc0..307c82115cfae 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -341,6 +341,8 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] + [pallet_grandpa, Grandpa] + [pallet_sassafras, Sassafras] ); } diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs new file mode 100644 index 0000000000000..2f1818e5b52cd --- /dev/null +++ b/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use super::*; +use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_io::hashing; + +fn make_dummy_ticket(i: usize) -> Ticket { + let buf = i.to_le_bytes(); + hashing::twox_256(&buf).try_into().unwrap() +} + +benchmarks! { + submit_tickets { + let x in 0 .. 100; + + // Almost fill the available tickets space. + + let max_tickets: u32 = ::MaxTickets::get() - 10; + let tickets: Vec = (0..max_tickets as usize).into_iter().map(|i| { + make_dummy_ticket(i) + }).collect(); + let _ = Pallet::::submit_tickets(RawOrigin::None.into(), tickets); + + // Create the tickets to submit during the benchmark + + let tickets: Vec = (0..x as usize).into_iter().map(|i| { + make_dummy_ticket(i + max_tickets as usize) + }).collect(); + }: _(RawOrigin::None, tickets) + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index cd2deb0d88f8c..58168ef8e2ae7 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -67,13 +67,12 @@ pub use sp_consensus_sassafras::{ PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, }; -// TODO-SASS-P2: tests and benches -#[cfg(test)] +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] mod mock; -#[cfg(test)] +#[cfg(all(feature = "std", test))] mod tests; -//#[cfg(feature = "runtime-benchmarks")] -//mod benchmarking; pub use pallet::*; @@ -286,11 +285,8 @@ pub mod pallet { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); - // We have to traverse the tickets list one by one to verify the SNARK proofs. let mut next_tickets = NextTickets::::get(); - // 1. validate proof - // 2. append to sorted list // TODO-SASS-P2: use a scattered structure for tickets next_tickets = next_tickets.try_mutate(|tree| { for ticket in tickets.iter() { @@ -338,6 +334,10 @@ pub mod pallet { // submit our tickets if we don't have enough authoring slots. // If we have 0 slots => we have zero chances. // Maybe this is one valid reason to introduce proxies. + // In short the question is >>> WHO HAS THE RIGHT TO SUBMIT A TICKET? <<< + // A) The current epoch validators + // B) The next epoch validators + // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) log::warn!( target: "sassafras::runtime", "🌳 Rejecting unsigned transaction from external sources.", @@ -355,6 +355,7 @@ pub mod pallet { } // TODO-SASS-P2 more validation steps: + // 0. validate the proof // 1. epoch index // 2. signed by an authority for current epoch // 3. single submission attempt from validator? @@ -410,9 +411,9 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - if *GenesisSlot::::get() == 0 { - return 0 - } + // if *GenesisSlot::::get() == 0 { + // return 0 + // } *slot.saturating_sub(Self::current_epoch_start()) } @@ -421,6 +422,11 @@ impl Pallet { /// /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. + /// + /// TODO-SASS-P3: + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. + /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. pub fn enact_epoch_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< @@ -428,37 +434,40 @@ impl Pallet { T::MaxAuthorities, >, ) { - // TODO-SASS-P2: we don't depend on session module... - - // PRECONDITION: caller has done initialization and is guaranteed by the session module to - // be called before this. + // PRECONDITION: caller has done initialization. + // If using the internal trigger or the session pallet then this is guaranteed. debug_assert!(Self::initialized().is_some()); + // Update authorities + Authorities::::put(authorities); + NextAuthorities::::put(&next_authorities); + // Update epoch index - let epoch_index = EpochIndex::::get() + // TODO-SASS-P2: fix this to allow epoch skip + let mut epoch_index = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - EpochIndex::::put(epoch_index); - // Update authorities - Authorities::::put(authorities); - NextAuthorities::::put(&next_authorities); + // TODO-SASS-P2: Test this, we also have to properly set the epoch index + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_index)); + if slot_idx >= T::EpochDuration::get() { + // Detected one or more skipped epochs, kill tickets and recompute the `epoch_index`. + NextTickets::::kill(); + // TODO-SASS-P2: adjust epoch index (TEST ME) + let idx: u64 = slot_idx.into(); + epoch_index += idx / T::EpochDuration::get(); + } + EpochIndex::::put(epoch_index); - // Update epoch randomness. let next_epoch_index = epoch_index .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - // Returns randomness for the current epoch and computes the *next* - // epoch randomness. - let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::::put(randomness); + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_randomness(next_epoch_index); // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - - let next_randomness = NextRandomness::::get(); - let next_epoch = NextEpochDescriptor { authorities: next_authorities.to_vec(), randomness: next_randomness, @@ -476,28 +485,41 @@ impl Pallet { // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); // } - Self::enact_tickets(); + Self::update_tickets(); } - /// Enact next epoch tickets list. - /// To work properly this should be done as the last action of the last epoch slot. - /// (i.e. current tickets list is not used at this point) - fn enact_tickets() { - // TODO-SASS-P2: manage skipped epoch by killing both Tickets and NextTickets - - let mut tickets = NextTickets::::get().into_iter().collect::>(); + /// Call this fuction on epoch change to update tickets. + /// Enact next epoch tickets. + fn update_tickets() { + let mut tickets = NextTickets::::take().into_iter().collect::>(); log::debug!(target: "sassafras", "🌳 @@@@@@@@@ Enacting {} tickets", tickets.len()); if tickets.len() > T::MaxTickets::get() as usize { log::error!(target: "sassafras", "🌳 should never happen..."); - let max = T::MaxTickets::get() as usize; - tickets.truncate(max); + tickets.truncate(T::MaxTickets::get() as usize); } + let tickets = BoundedVec::::try_from(tickets) .expect("vector has been eventually truncated; qed"); - Tickets::::put(tickets); - NextTickets::::kill(); + } + + /// Call this function on epoch change to update the randomness. + /// Returns the next epoch randomness. + fn update_randomness(next_epoch_index: u64) -> schnorrkel::Randomness { + let curr_randomness = NextRandomness::::get(); + Randomness::::put(curr_randomness); + + let accumulator = RandomnessAccumulator::::get(); + let mut s = Vec::with_capacity(2 * curr_randomness.len() + 8); + s.extend_from_slice(&curr_randomness); + s.extend_from_slice(&next_epoch_index.to_le_bytes()); + s.extend_from_slice(&accumulator); + + let next_randomness = sp_io::hashing::blake2_256(&s); + NextRandomness::::put(&next_randomness); + + next_randomness } /// Finds the start slot of the current epoch. Only guaranteed to give correct results after @@ -531,14 +553,13 @@ impl Pallet { // TODO-SASS-P2: temporary fix to make the compiler happy #[allow(dead_code)] fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { - //if !authorities.is_empty() { + assert!(!authorities.is_empty()); assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) .expect("Initial number of authorities should be lower than T::MaxAuthorities"); Authorities::::put(&bounded_authorities); NextAuthorities::::put(&bounded_authorities); - //} } fn initialize_genesis_epoch(genesis_slot: Slot) { @@ -590,33 +611,34 @@ impl Pallet { T::EpochChangeTrigger::trigger::(now); } - /// Call this function exactly once when an epoch changes, to update the randomness. - /// Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::::get(); - let accumulator = RandomnessAccumulator::::get(); - - let mut s = Vec::with_capacity(2 * this_randomness.len() + 8); - s.extend_from_slice(&this_randomness); - s.extend_from_slice(&next_epoch_index.to_le_bytes()); - s.extend_from_slice(&accumulator); - - let next_randomness = sp_io::hashing::blake2_256(&s); - NextRandomness::::put(&next_randomness); - - this_randomness - } - - /// Fetch expected ticket for the given slot. + /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the `Tickets` + /// list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the `NextTickets` + /// list. Note that in this case we may have not finished receiving all the tickets for that + /// epoch yet. The next epoch tickets should be considered "stable" only after the current + /// epoch first half (see the [`submit_tickers_unsigned_extrinsic`]). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. // TODO-SASS-P2: This is a very inefficient and temporary solution. // On refactory we will come up with a better solution (like a scattered vector). pub fn slot_ticket(slot: Slot) -> Option { let duration = T::EpochDuration::get(); let slot_idx = Self::slot_epoch_index(slot); // % duration; - // Given a list of ordered tickets: t0, t1, t2, ..., tk to be assigned to N slots (N>k) - // The tickets are assigned to the slots in the following order: t1, t3, ..., t4, t2, t0. - let ticket_index = |slot_idx| { let ticket_idx = if slot_idx < duration / 2 { 2 * slot_idx + 1 @@ -635,7 +657,7 @@ impl Pallet { } else if slot_idx < 2 * duration { // Get a ticket for the next epoch. Since its state values were not enacted yet, we // have to fetch it from the `NextTickets` list. This may happen when an author request - // the first ticket of a new epoch. + // the first ticket for an epoch. let tickets = NextTickets::::get(); let idx = ticket_index(slot_idx - duration); tickets.iter().nth(idx).cloned() @@ -646,6 +668,12 @@ impl Pallet { } /// Submit next epoch validator tickets via an unsigned extrinsic. + /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has + /// is called within the first half of the epoch. That is, tickets received within the + /// second half are dropped. + // TODO-SASS-P2: + // 1. we have to add the epoch and slot index to the call parameters. + // 2. maybe we have to drop tickets SUBMITTED after the first half. pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); let call = Call::submit_tickets { tickets }; diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 4ad994184174b..13be7cf1d5034 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Test utilities for Sassafras consensus. +//! Test utilities for Sassafras pallet. // TODO-SASS-P2 remove #![allow(unused_imports)] @@ -29,7 +29,9 @@ use frame_support::{ }, }; use scale_codec::Encode; -use sp_consensus_sassafras::{AuthorityId, AuthorityPair, Slot}; +use sp_consensus_sassafras::{ + digests::PreDigest, AuthorityId, AuthorityIndex, AuthorityPair, Slot, +}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::{ crypto::{IsWrappedBy, KeyTypeId, Pair}, @@ -141,22 +143,20 @@ pub fn new_test_ext_with_pairs( (pairs, t.into()) } -fn make_ticket_vrf( - slot: Slot, - attempt: u64, - pair: &sp_consensus_sassafras::AuthorityPair, -) -> (VRFOutput, VRFProof) { +fn make_ticket_vrf(slot: Slot, attempt: u64, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization let epoch_start = Sassafras::current_epoch_start(); - // Check if epoch index is going to change on initialization if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { - epoch += 1; + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); } - let transcript = - sp_consensus_sassafras::make_ticket_transcript(&Sassafras::randomness(), attempt, epoch); + let transcript = sp_consensus_sassafras::make_ticket_transcript(&randomness, attempt, epoch); let inout = pair.vrf_sign(transcript); let output = VRFOutput(inout.0.to_output()); let proof = VRFProof(inout.1); @@ -164,32 +164,27 @@ fn make_ticket_vrf( (output, proof) } -pub fn make_tickets( - slot: Slot, - attempts: u64, - pair: &sp_consensus_sassafras::AuthorityPair, -) -> Vec<(VRFOutput, VRFProof)> { +pub fn make_tickets(slot: Slot, attempts: u64, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { (0..attempts) .into_iter() .map(|attempt| make_ticket_vrf(slot, attempt, pair)) .collect() } -fn make_slot_vrf( - slot: Slot, - pair: &sp_consensus_sassafras::AuthorityPair, -) -> (VRFOutput, VRFProof) { +fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization let epoch_start = Sassafras::current_epoch_start(); - // Check if epoch index is going to change on initialization if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { - epoch += 1; + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); } - let transcript = - sp_consensus_sassafras::make_slot_transcript(&Sassafras::randomness(), slot, epoch); + let transcript = sp_consensus_sassafras::make_slot_transcript(&randomness, slot, epoch); let inout = pair.vrf_sign(transcript); let output = VRFOutput(inout.0.to_output()); let proof = VRFProof(inout.1); @@ -198,24 +193,18 @@ fn make_slot_vrf( } pub fn make_pre_digest( - authority_index: sp_consensus_sassafras::AuthorityIndex, - slot: sp_consensus_sassafras::Slot, - pair: &sp_consensus_sassafras::AuthorityPair, -) -> sp_consensus_sassafras::digests::PreDigest { + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> PreDigest { let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); - sp_consensus_sassafras::digests::PreDigest { - authority_index, - slot, - vrf_output, - vrf_proof, - ticket_info: None, - } + PreDigest { authority_index, slot, vrf_output, vrf_proof, ticket_info: None } } pub fn make_wrapped_pre_digest( - authority_index: sp_consensus_sassafras::AuthorityIndex, - slot: sp_consensus_sassafras::Slot, - pair: &sp_consensus_sassafras::AuthorityPair, + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, ) -> Digest { let pre_digest = make_pre_digest(authority_index, slot, pair); let log = @@ -223,11 +212,11 @@ pub fn make_wrapped_pre_digest( Digest { logs: vec![log] } } -pub fn go_to_block(number: u64, slot: u64, pair: &sp_consensus_sassafras::AuthorityPair) -> Digest { +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { Sassafras::on_finalize(System::block_number()); let parent_hash = System::finalize().hash(); - let digest = make_wrapped_pre_digest(0, slot.into(), pair); + let digest = make_wrapped_pre_digest(0, slot, pair); System::reset_events(); System::initialize(&number, &parent_hash, &digest); @@ -237,16 +226,13 @@ pub fn go_to_block(number: u64, slot: u64, pair: &sp_consensus_sassafras::Author } /// Slots will grow accordingly to blocks -pub fn progress_to_block( - number: u64, - pair: &sp_consensus_sassafras::AuthorityPair, -) -> Option { - let mut slot = u64::from(Sassafras::current_slot()) + 1; +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; let mut digest = None; for i in System::block_number() + 1..=number { let dig = go_to_block(i, slot, pair); digest = Some(dig); - slot += 1; + slot = slot + 1; } digest } diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index cf20eea03a272..27dcdf66917cc 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for Sassafras consensus. +//! Tests for Sassafras pallet. // TODO-SASS-P2 remove #![allow(unused_imports)] @@ -25,14 +25,50 @@ use mock::*; use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{Currency, EstimateNextSessionRotation, OnFinalize, OnInitialize}, + dispatch::EncodeLike, + traits::{ConstU32, Currency, EstimateNextSessionRotation, OnFinalize, OnInitialize}, weights::{GetDispatchInfo, Pays}, + BoundedBTreeSet, }; use hex_literal::hex; use pallet_session::ShouldEndSession; use sp_consensus_sassafras::{SassafrasEpochConfiguration, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::crypto::Pair; +use sp_runtime::traits::Get; +use std::collections::BTreeSet; + +#[test] +fn slot_ticket_fetch() { + let max_tickets: u32 = ::MaxTickets::get(); + + let tickets: Vec = (0..max_tickets as u8) + .into_iter() + .map(|i| [i; 32].try_into().unwrap()) + .collect(); + let tickets = + BoundedVec::<_, _>::try_from(tickets).expect("vector has been eventually truncated; qed"); + + new_test_ext(4).execute_with(|| { + Tickets::::put(tickets.clone()); + + assert_eq!(Sassafras::slot_ticket(0.into()), Some(tickets[1])); + assert_eq!(Sassafras::slot_ticket(1.into()), Some(tickets[3])); + assert_eq!(Sassafras::slot_ticket(2.into()), Some(tickets[5])); + assert_eq!(Sassafras::slot_ticket(3.into()), None); + assert_eq!(Sassafras::slot_ticket(4.into()), None); + assert_eq!(Sassafras::slot_ticket(5.into()), None); + assert_eq!(Sassafras::slot_ticket(6.into()), None); + assert_eq!(Sassafras::slot_ticket(7.into()), Some(tickets[4])); + assert_eq!(Sassafras::slot_ticket(8.into()), Some(tickets[2])); + assert_eq!(Sassafras::slot_ticket(9.into()), Some(tickets[0])); + + // TODO-SASS-P2: test next epoch tickets fetch + assert_eq!(Sassafras::slot_ticket(10.into()), None); + + assert_eq!(Sassafras::slot_ticket(42.into()), None); + }); +} #[test] fn genesis_values() { @@ -47,19 +83,20 @@ fn on_first_after_genesis_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = Slot::from(100); - let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); + let start_slot = Slot::from(100); + let start_block = 1; - System::initialize(&1, &Default::default(), &digest); - Sassafras::on_initialize(1); + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); // Post-initialization status assert!(Initialized::::get().is_some()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_epoch_start(), start_slot); assert_eq!(Sassafras::current_slot_epoch_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); @@ -71,10 +108,10 @@ fn on_first_after_genesis_block() { // Post-finalization status assert!(Initialized::::get().is_none()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_epoch_start(), start_slot); assert_eq!(Sassafras::current_slot_epoch_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); @@ -105,11 +142,12 @@ fn on_normal_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = Slot::from(100); + let start_slot = Slot::from(100); + let start_block = 1; - let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); - System::initialize(&1, &Default::default(), &digest); - Sassafras::on_initialize(1); + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); // We don't want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); @@ -119,10 +157,10 @@ fn on_normal_block() { // Post-initialization status assert!(Initialized::::get().is_some()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot + 1); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_epoch_start(), start_slot); assert_eq!(Sassafras::current_slot_epoch_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); @@ -131,16 +169,16 @@ fn on_normal_block() { hex!("98dc63bd10704f60016011be269a02ec780e9b870222d12457ea7e8a05065028"), ); - Sassafras::on_finalize(1); + Sassafras::on_finalize(2); let header = System::finalize(); // Post-finalization status assert!(Initialized::::get().is_none()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot + 1); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot); + assert_eq!(Sassafras::current_epoch_start(), start_slot); assert_eq!(Sassafras::current_slot_epoch_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); @@ -153,31 +191,32 @@ fn on_normal_block() { assert_eq!(header.digest.logs.len(), 1); assert_eq!(header.digest.logs[0], digest.logs[0]); - }) + }); } #[test] -fn on_epoch_change_block() { +fn epoch_change_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = Slot::from(100); + let start_slot = Slot::from(100); + let start_block = 1; - let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); - System::initialize(&1, &Default::default(), &digest); - Sassafras::on_initialize(1); + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); // We want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); - let digest = progress_to_block(1 + epoch_duration, &pairs[0]).unwrap(); + let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); // Post-initialization status assert!(Initialized::::get().is_some()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); assert_eq!(Sassafras::epoch_index(), 1); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); assert_eq!(Sassafras::current_slot_epoch_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32],); assert_eq!( @@ -189,16 +228,16 @@ fn on_epoch_change_block() { hex!("4cfa0840c842f6095155b35bad7f0bf8113c11a12a8ab3e3d116d91b0e8f31f9"), ); - Sassafras::on_finalize(1); + Sassafras::on_finalize(start_block + epoch_duration); let header = System::finalize(); // Post-finalization status assert!(Initialized::::get().is_none()); - assert_eq!(Sassafras::genesis_slot(), genesis_slot); - assert_eq!(Sassafras::current_slot(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); assert_eq!(Sassafras::epoch_index(), 1); - assert_eq!(Sassafras::current_epoch_start(), genesis_slot + epoch_duration); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); assert_eq!(Sassafras::current_slot_epoch_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( @@ -226,23 +265,17 @@ fn on_epoch_change_block() { }) } -#[test] -fn enact_epoch_change() { - // TODO-SASS-P2 - // Check NextRandomness and RandomnessAccumulator enactment - // DO this here, in a specific test for readability -} - #[test] fn submit_enact_claim_tickets() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = Slot::from(100); + let start_slot = Slot::from(100); + let start_block = 1; - let digest = make_wrapped_pre_digest(0, genesis_slot, &pairs[0]); - System::initialize(&1, &Default::default(), &digest); - Sassafras::on_initialize(1); + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); // We don't want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); @@ -254,7 +287,7 @@ fn submit_enact_claim_tickets() { assert!(NextTickets::::get().is_empty()); // Submit authoring tickets. - let mut tickets: Vec = make_tickets(genesis_slot + 1, 30, &pairs[0]) + let mut tickets: Vec = make_tickets(start_slot + 1, 30, &pairs[0]) .into_iter() .map(|(output, _)| output) .collect(); @@ -280,10 +313,10 @@ fn submit_enact_claim_tickets() { assert_eq!(expected_tickets, next_tickets); // Check if we can claim next epoch tickets in outside-in fashion. + // // This is to allow native code to eventually fetch the first ticket for a new epoch, // before the epoch data is effectivelly enacted by the runtime - // (authors tries to claim a ticket before block construction). - // TODO-SASS-P2 BETTER EXPLANATION + // (block authors tries to claim a ticket before block construction). let slot = Sassafras::current_slot(); assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); @@ -296,6 +329,7 @@ fn submit_enact_claim_tickets() { assert!(Sassafras::slot_ticket(slot + 11).is_none()); // Enact epoch tickets by progressing one more block + let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); let curr_tickets = Tickets::::get().into_iter().collect::>(); assert_eq!(expected_tickets, curr_tickets); @@ -311,5 +345,48 @@ fn submit_enact_claim_tickets() { assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[2]); assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[0]); assert!(Sassafras::slot_ticket(slot + 10).is_none()); - }) + }); +} + +#[test] +fn block_skips_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + let tickets: Vec = make_tickets(start_slot + 1, 30, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + Sassafras::submit_tickets(Origin::none(), tickets.clone()).unwrap(); + + assert!(Tickets::::get().is_empty()); + assert!(!NextTickets::::get().is_empty()); + let next_random = NextRandomness::::get(); + + // We want to trigger an skip epoch in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let offset = 3 * epoch_duration; + let _digest = go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 3); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + // Tickets were discarded + assert!(Tickets::::get().is_empty()); + assert!(NextTickets::::get().is_empty()); + // We've used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); } From b38c437abc501e9f2150c47e57a2e071f9026f76 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 5 Aug 2022 10:35:04 +0200 Subject: [PATCH 05/14] Small improvement --- client/consensus/sassafras/src/authorship.rs | 23 +++++++++----------- frame/sassafras/src/lib.rs | 19 +++++----------- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 34f10513b2258..5063751a2bc1c 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -70,19 +70,16 @@ pub fn claim_slot( transcript_data, ); - match result { - Ok(Some(signature)) => { - let pre_digest = PreDigest { - authority_index: authority_index as u32, - slot, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof.clone()), - ticket_info, - }; - Some((pre_digest, authority_id.clone())) - }, - _ => None, - } + result.ok().flatten().map(|signature| { + let pre_digest = PreDigest { + authority_index: authority_index as u32, + slot, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof.clone()), + ticket_info, + }; + (pre_digest, authority_id.clone()) + }) } /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 58168ef8e2ae7..1741db48a063a 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -287,7 +287,7 @@ pub mod pallet { let mut next_tickets = NextTickets::::get(); - // TODO-SASS-P2: use a scattered structure for tickets + // TODO-SASS-P2: temporary code next_tickets = next_tickets.try_mutate(|tree| { for ticket in tickets.iter() { tree.insert(*ticket); @@ -295,7 +295,7 @@ pub mod pallet { let max_tickets = T::MaxTickets::get() as usize; if tree.len() > max_tickets { // Remove the mid values - // TODO-SASS-P2: with the new structure this will be reimplemented... + // TODO-SASS-P2: don't judge me, this will be reimplemented :-) let diff = tree.len() - max_tickets; let off = max_tickets / 2; let val = tree.iter().nth(off).cloned().unwrap(); @@ -354,12 +354,6 @@ pub mod pallet { return InvalidTransaction::Stale.into() } - // TODO-SASS-P2 more validation steps: - // 0. validate the proof - // 1. epoch index - // 2. signed by an authority for current epoch - // 3. single submission attempt from validator? - ValidTransaction::with_tag_prefix("Sassafras") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) @@ -411,9 +405,9 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - // if *GenesisSlot::::get() == 0 { - // return 0 - // } + if *GenesisSlot::::get() == 0 { + return 0 + } *slot.saturating_sub(Self::current_epoch_start()) } @@ -443,7 +437,6 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update epoch index - // TODO-SASS-P2: fix this to allow epoch skip let mut epoch_index = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); @@ -628,7 +621,7 @@ impl Pallet { /// If `slot` value falls within the next epoch then we fetch tickets from the `NextTickets` /// list. Note that in this case we may have not finished receiving all the tickets for that /// epoch yet. The next epoch tickets should be considered "stable" only after the current - /// epoch first half (see the [`submit_tickers_unsigned_extrinsic`]). + /// epoch first half (see the [`submit_tickets_unsigned_extrinsic`]). /// /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), From 570084f8a3a0ae7c131078e0fcf13f72e54826b7 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 8 Aug 2022 17:42:46 +0200 Subject: [PATCH 06/14] Code cleanup --- client/consensus/sassafras/src/authorship.rs | 4 +- client/consensus/sassafras/src/lib.rs | 43 +++++++++----------- frame/sassafras/src/lib.rs | 11 ++--- 3 files changed, 27 insertions(+), 31 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 5063751a2bc1c..77092c7340418 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -141,7 +141,7 @@ pub fn generate_epoch_tickets( Err(_) => continue, }; - let get_ticket = |attempt| { + let make_ticket = |attempt| { let transcript_data = make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); @@ -172,7 +172,7 @@ pub fn generate_epoch_tickets( }; for attempt in 0..max_attempts { - if let Some((ticket, ticket_info)) = get_ticket(attempt) { + if let Some((ticket, ticket_info)) = make_ticket(attempt) { tickets.push(ticket); epoch.tickets_info.insert(ticket, ticket_info); } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 083a713034372..4d05d7b0f9e8e 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -401,14 +401,14 @@ where can_author_with, ); - let ticket_worker = tickets_worker( + let tickets_worker = tickets_worker( client.clone(), keystore, sassafras_link.epoch_changes.clone(), select_chain, ); - let inner = future::select(Box::pin(slot_worker), Box::pin(ticket_worker)); + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) } @@ -437,28 +437,19 @@ async fn tickets_worker( debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); - let tickets = { - let mut epoch_changes = epoch_changes.shared_data(); - - let number = *notification.header.number(); - let position = if number == One::one() { - EpochIdentifierPosition::Genesis1 - } else { - EpochIdentifierPosition::Regular - }; - let mut epoch_identifier = - EpochIdentifier { position, hash: notification.hash, number }; - - let epoch = match epoch_changes.epoch_mut(&mut epoch_identifier) { - Some(epoch) => epoch, - None => { - warn!(target: "sassafras", "🌳 Unexpected missing epoch data for {}", notification.hash); - continue - }, - }; - - authorship::generate_epoch_tickets(epoch, 30, 1, &keystore) + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let tickets = epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| authorship::generate_epoch_tickets(epoch, 30, 1, &keystore)) + .unwrap_or_default(); if tickets.is_empty() { continue @@ -480,7 +471,11 @@ async fn tickets_worker( }; if let Some(err) = err { error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); - // TODO-SASS-P2: on error remove tickets from epoch... + // Remove tickets from epoch tree node. + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| epoch.tickets_info.clear()); } } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 1741db48a063a..4c4afb90fc604 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -589,6 +589,7 @@ impl Pallet { .next(); let pre_digest = pre_digest.expect("Valid Sassafras block should have a pre-digest. qed"); // let Some(ref pre_digest) = pre_digest { + // let current_slot = pre_digest.slot; CurrentSlot::::put(current_slot); @@ -632,7 +633,7 @@ impl Pallet { let duration = T::EpochDuration::get(); let slot_idx = Self::slot_epoch_index(slot); // % duration; - let ticket_index = |slot_idx| { + let get_ticket_idx = |slot_idx| { let ticket_idx = if slot_idx < duration / 2 { 2 * slot_idx + 1 } else { @@ -645,15 +646,15 @@ impl Pallet { if slot_idx < duration { // Get a ticket for the current epoch. let tickets = Tickets::::get(); - let idx = ticket_index(slot_idx); - tickets.get(idx).cloned() + let ticket_idx = get_ticket_idx(slot_idx); + tickets.get(ticket_idx).cloned() } else if slot_idx < 2 * duration { // Get a ticket for the next epoch. Since its state values were not enacted yet, we // have to fetch it from the `NextTickets` list. This may happen when an author request // the first ticket for an epoch. let tickets = NextTickets::::get(); - let idx = ticket_index(slot_idx - duration); - tickets.iter().nth(idx).cloned() + let ticket_idx = get_ticket_idx(slot_idx - duration); + tickets.iter().nth(ticket_idx).cloned() } else { // We have no tickets for the requested slot yet. None From 0d2c9af6a7281d22d235274424b173ec01928004 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 8 Aug 2022 22:34:53 +0200 Subject: [PATCH 07/14] Simplify the overall VRF output management --- client/consensus/sassafras/src/authorship.rs | 54 +++--------- client/consensus/sassafras/src/lib.rs | 4 +- .../consensus/sassafras/src/verification.rs | 2 +- frame/sassafras/src/lib.rs | 36 ++++++-- primitives/consensus/sassafras/src/lib.rs | 85 +++++++++++++------ 5 files changed, 105 insertions(+), 76 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 77092c7340418..1542977a0a60d 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -24,11 +24,11 @@ use scale_codec::Encode; use sp_application_crypto::AppKey; use sp_consensus_sassafras::{ digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, - Ticket, TicketInfo, SASSAFRAS_TICKET_VRF_PREFIX, + Ticket, TicketInfo, }; -use sp_consensus_vrf::schnorrkel::{PublicKey, VRFInOut, VRFOutput, VRFProof}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::{twox_64, ByteArray}; -use sp_keystore::{vrf::make_transcript, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// Get secondary authority index for the given epoch and slot. #[inline] @@ -82,34 +82,6 @@ pub fn claim_slot( }) } -/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: -/// - x: redundancy factor; -/// - s: number of slots in epoch; -/// - a: max number of attempts; -/// - v: number of validator in epoch. -/// The parameters should be chosen such that T <= 1. -/// If `attempts * validators` is zero then we fallback to T = 0 -// TODO-SASS-P3: this formula must be double-checked... -#[inline] -fn calculate_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> u128 { - let den = attempts as u128 * validators as u128; - let num = redundancy as u128 * slots as u128; - let res = u128::MAX.checked_div(den).unwrap_or(0).saturating_mul(num); - - // TODO-SASS-P4 remove me - log::debug!( - target: "sassafras", - "🌳 Tickets threshold: {} {:016x}", num as f64 / den as f64, res, - ); - res -} - -/// Returns true if the given VRF output is lower than the given threshold, false otherwise. -#[inline] -pub fn check_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(SASSAFRAS_TICKET_VRF_PREFIX)) < threshold -} - /// Generate the tickets for the given epoch. /// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` /// structure. The additional information will be used during epoch to claim slots. @@ -121,26 +93,22 @@ pub fn generate_epoch_tickets( ) -> Vec { let mut tickets = vec![]; - let threshold = calculate_threshold( + let threshold = sp_consensus_sassafras::compute_threshold( redundancy_factor, epoch.duration as u32, max_attempts, epoch.authorities.len() as u32, ); + // TODO-SASS-P4 remove me + log::debug!(target: "sassafras", "🌳 Tickets threshold: {:032x}", threshold); let authorities = epoch.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); for (authority_index, authority_id) in authorities { - let raw_key = authority_id.to_raw_vec(); - - if !SyncCryptoStore::has_keys(&**keystore, &[(raw_key.clone(), AuthorityId::ID)]) { + if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) + { continue } - let public = match PublicKey::from_bytes(&raw_key) { - Ok(public) => public, - Err(_) => continue, - }; - let make_ticket = |attempt| { let transcript_data = make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); @@ -155,13 +123,11 @@ pub fn generate_epoch_tickets( ) .ok()??; - let transcript = make_transcript(transcript_data); - let inout = signature.output.attach_input_hash(&public, transcript).ok()?; - if !check_threshold(&inout, threshold) { + let ticket = VRFOutput(signature.output); + if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { return None } - let ticket = VRFOutput(signature.output); let ticket_info = TicketInfo { attempt: attempt as u32, authority_index: authority_index as u32, diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4d05d7b0f9e8e..d6abbbb2ed531 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -445,10 +445,12 @@ async fn tickets_worker( }; let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + let attempts = sp_consensus_sassafras::TICKET_MAX_ATTEMPTS; + let redundancy = sp_consensus_sassafras::TICKET_REDUNDANCY_FACTOR; let tickets = epoch_changes .shared_data() .epoch_mut(&epoch_identifier) - .map(|epoch| authorship::generate_epoch_tickets(epoch, 30, 1, &keystore)) + .map(|epoch| authorship::generate_epoch_tickets(epoch, attempts, redundancy, &keystore)) .unwrap_or_default(); if tickets.is_empty() { diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index f65ea1fb90d0d..a58caf374ae68 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -128,7 +128,7 @@ pub fn check_header( }, } - // Check block-vrf proof + // Check slot-vrf proof let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 4c4afb90fc604..290274153a720 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -280,7 +280,10 @@ pub mod pallet { impl Pallet { /// Submit next epoch tickets. #[pallet::weight(10_000)] - pub fn submit_tickets(origin: OriginFor, tickets: Vec) -> DispatchResult { + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec, + ) -> DispatchResult { ensure_none(origin)?; log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); @@ -346,7 +349,9 @@ pub mod pallet { } // Current slot should be less than half of epoch duration. - if Self::current_slot_epoch_index() >= T::EpochDuration::get() / 2 { + let epoch_duration = T::EpochDuration::get(); + + if Self::current_slot_epoch_index() >= epoch_duration / 2 { log::warn!( target: "sassafras::runtime", "🌳 Timeout to propose tickets, bailing out.", @@ -354,6 +359,25 @@ pub mod pallet { return InvalidTransaction::Stale.into() } + // Check tickets are below threshold + + let next_auth = NextAuthorities::::get(); + let threshold = sp_consensus_sassafras::compute_threshold( + sp_consensus_sassafras::TICKET_REDUNDANCY_FACTOR, + epoch_duration as u32, + sp_consensus_sassafras::TICKET_MAX_ATTEMPTS, + next_auth.len() as u32, + ); + + // TODO-SASS-P2: if we move this in the function above we can drop only + // the invalid tickets. + if !tickets + .iter() + .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) + { + return InvalidTransaction::Custom(0).into() + } + ValidTransaction::with_tag_prefix("Sassafras") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) @@ -665,11 +689,11 @@ impl Pallet { /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has /// is called within the first half of the epoch. That is, tickets received within the /// second half are dropped. - // TODO-SASS-P2: - // 1. we have to add the epoch and slot index to the call parameters. - // 2. maybe we have to drop tickets SUBMITTED after the first half. - pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + /// TODO-SASS-P3: we have to add the zk validity proofs + pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); + tickets.sort_unstable(); + let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; SubmitTransaction::>::submit_unsigned_transaction(call.into()).is_ok() } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 5ecae01fe4da7..df043ecd71956 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -28,6 +28,7 @@ use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; +use sp_core::{crypto, U256}; #[cfg(feature = "std")] use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; @@ -35,11 +36,12 @@ use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + Randomness, VRFInOut, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, + VRF_PROOF_LENGTH, }; /// Key type for Sassafras module. -pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; +pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; pub mod digests; pub mod inherents; @@ -52,12 +54,6 @@ mod app { /// The index of an authority. pub type AuthorityIndex = u32; -/// The prefix used by Sassafras for its ticket VRF keys. -pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf"; - -/// The prefix used by Sassafras for its post-block VRF keys. -pub const SASSAFRAS_BLOCK_VRF_PREFIX: &[u8] = b"substrate-sassafras-block-vrf"; - /// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Sassafras module. If that ever changes, then this must, too. #[cfg(feature = "std")] @@ -131,12 +127,22 @@ pub struct TicketInfo { pub proof: VRFProof, } +const TYPE_LABEL: &str = "type"; +const EPOCH_LABEL: &str = "epoch"; +const SLOT_LABEL: &str = "slot"; +const ATTEMPT_LABEL: &str = "slot"; +const RANDOMNESS_LABEL: &str = "randomness"; + +const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; +const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; + /// Make slot VRF transcript. pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_u64(b"slot number", *slot); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", &randomness[..]); + transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); transcript } @@ -150,37 +156,38 @@ pub fn make_slot_transcript_data( VRFTranscriptData { label: &SASSAFRAS_ENGINE_ID, items: vec![ - ("slot number", VRFTranscriptValue::U64(*slot)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), + (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), + (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), ], } } /// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &[u8], attempt: u64, epoch: u64) -> Transcript { +pub fn make_ticket_transcript(randomness: &Randomness, attempt: u64, epoch: u64) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(b"type", b"ticket"); - transcript.append_u64(b"attempt", attempt); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", randomness); + transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); transcript } /// Make ticket VRF transcript data container. #[cfg(feature = "std")] pub fn make_ticket_transcript_data( - randomness: &[u8], + randomness: &Randomness, attempt: u64, epoch: u64, ) -> VRFTranscriptData { VRFTranscriptData { label: &SASSAFRAS_ENGINE_ID, items: vec![ - ("type", VRFTranscriptValue::Bytes(b"ticket".to_vec())), - ("attempt", VRFTranscriptValue::U64(attempt)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), + (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), + (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), ], } } @@ -199,3 +206,33 @@ sp_api::decl_runtime_apis! { fn slot_ticket(slot: Slot) -> Option; } } + +/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: +/// - x: redundancy factor; +/// - s: number of slots in epoch; +/// - a: max number of attempts; +/// - v: number of validator in epoch. +/// The parameters should be chosen such that T <= 1. +/// If `attempts * validators` is zero then we fallback to T = 0 +// TODO-SASS-P3: this formula must be double-checked... +#[inline] +pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> U256 { + let den = attempts as u64 * validators as u64; + let num = redundancy as u64 * slots as u64; + U256::max_value() + .checked_div(den.into()) + .unwrap_or(U256::zero()) + .saturating_mul(num.into()) +} + +/// Returns true if the given VRF output is lower than the given threshold, false otherwise. +#[inline] +pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { + U256::from(ticket.as_bytes()) < threshold +} + +/// TODO-SASS-P3: add to session config +pub const TICKET_MAX_ATTEMPTS: u32 = 30; + +/// TODO-SASS-P3: add to session config +pub const TICKET_REDUNDANCY_FACTOR: u32 = 1; From 775eca5f532521c5cc99d4994d7bcb4f5d97aa21 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 10 Aug 2022 10:14:44 +0200 Subject: [PATCH 08/14] Improve pallet tickets management --- frame/sassafras/src/lib.rs | 247 ++++++++++++++-------- frame/sassafras/src/mock.rs | 4 +- frame/sassafras/src/tests.rs | 145 ++++++++----- primitives/consensus/sassafras/src/lib.rs | 8 +- 4 files changed, 249 insertions(+), 155 deletions(-) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 290274153a720..245b9fb783813 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -43,14 +43,14 @@ //! To anonymously publish the ticket to the chain a validator sends their tickets //! to a random validator who later puts it on-chain as a transaction. -// TODO-SASS-P2 -//#![deny(warnings)] -//#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +#![deny(warnings)] +#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use scale_codec::{Decode, Encode}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; -use frame_support::{traits::Get, weights::Weight, BoundedBTreeSet, BoundedVec, WeakBoundedVec}; +use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_application_crypto::ByteArray; use sp_consensus_vrf::schnorrkel; @@ -76,6 +76,18 @@ mod tests; pub use pallet::*; +/// Tickets related metadata that are commonly used together. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of tickets available for even and odd session indices respectivelly. + /// I.e. the index is computed as session-index modulo 2. + pub tickets_count: [u32; 2], + /// Number of tickets segments + pub segments_count: u32, + /// Last segment has been already sorted + pub sort_started: bool, +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -196,16 +208,26 @@ pub mod pallet { #[pallet::storage] pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; - /// Current session tickets. + /// Stored tickets metadata. #[pallet::storage] - pub type Tickets = StorageValue<_, BoundedVec, ValueQuery>; + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets to be used for current and next session. + /// The key consists of a + /// - `u8` equal to session-index mod 2 + /// - `u32` equal to the slot-index. + #[pallet::storage] + pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; + + // /// Next session tickets temporary accumulator length. + // #[pallet::storage] + // pub type NextTicketsSegmentsCount = StorageValue<_, u32, ValueQuery>; - /// Next session tickets. - // TODO-SASS-P2: probably the best thing is to store the tickets in a map - // Each map entry contains a vector of tickets as they are received. + /// Next session tickets temporary accumulator. + /// Special u32::MAX key is reserved for partially sorted segment. #[pallet::storage] - pub type NextTickets = - StorageValue<_, BoundedBTreeSet, ValueQuery>; + pub type NextTicketsSegments = + StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; /// Genesis configuration for Sassafras protocol. #[cfg_attr(feature = "std", derive(Default))] @@ -252,24 +274,23 @@ pub mod pallet { schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() }) .and_then(|pubkey| { - let current_slot = CurrentSlot::::get(); - let transcript = sp_consensus_sassafras::make_slot_transcript( &Self::randomness(), - current_slot, + Self::current_slot(), EpochIndex::::get(), ); - let vrf_output = pre_digest.vrf_output; - // This has already been verified by the client on block import. debug_assert!(pubkey - .vrf_verify(transcript.clone(), &vrf_output, &pre_digest.vrf_proof) + .vrf_verify( + transcript.clone(), + &pre_digest.vrf_output, + &pre_digest.vrf_proof + ) .is_ok()); - vrf_output.0.attach_input_hash(&pubkey, transcript).ok() + Some(pre_digest.vrf_output.to_bytes()) }) - .map(|inout| inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX)) .expect("Pre-digest contains valid randomness; qed"); Self::deposit_randomness(&randomness); @@ -286,32 +307,14 @@ pub mod pallet { ) -> DispatchResult { ensure_none(origin)?; - log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); + let mut metadata = TicketsMeta::::get(); - let mut next_tickets = NextTickets::::get(); - - // TODO-SASS-P2: temporary code - next_tickets = next_tickets.try_mutate(|tree| { - for ticket in tickets.iter() { - tree.insert(*ticket); - } - let max_tickets = T::MaxTickets::get() as usize; - if tree.len() > max_tickets { - // Remove the mid values - // TODO-SASS-P2: don't judge me, this will be reimplemented :-) - let diff = tree.len() - max_tickets; - let off = max_tickets / 2; - let val = tree.iter().nth(off).cloned().unwrap(); - let mut mid = tree.split_off(&val); - let val = mid.iter().nth(diff).cloned().unwrap(); - let mut tail = mid.split_off(&val); - tree.append(&mut tail); - log::warn!(target: "sassafras", "🌳 TICKETS OVERFLOW, drop {} tickets... (len = {})", diff, tree.len()); - } - }).expect("Tickets list len is within the allowed bounds; qed."); - - NextTickets::::put(next_tickets); + log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); + // We just require a unique key to save the partial tickets list. + metadata.segments_count += 1; + NextTicketsSegments::::insert(metadata.segments_count, tickets); + TicketsMeta::::set(metadata); Ok(()) } } @@ -369,8 +372,10 @@ pub mod pallet { next_auth.len() as u32, ); - // TODO-SASS-P2: if we move this in the function above we can drop only - // the invalid tickets. + // TODO-SASS-P2: if we move this in the `submit_tickets` call then we can + // can drop only the invalid tickets. + // In this way we don't penalize validators that submit tickets together + // with faulty validators. if !tickets .iter() .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) @@ -384,8 +389,8 @@ pub mod pallet { // TODO-SASS-P2: if possible use a more efficient way to distinquish // duplicates... .and_provides(tickets) - // TODO-SASS-P2: this should be set such that it is discarded after the first - // half + // TODO-SASS-P2: this sholot_tld be set such that it is discarded after the + // first half .longevity(3_u64) .propagate(true) .build() @@ -429,9 +434,10 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - if *GenesisSlot::::get() == 0 { - return 0 - } + // TODO-SASS-P2 : is this required? + // if *GenesisSlot::::get() == 0 { + // return 0 + // } *slot.saturating_sub(Self::current_epoch_start()) } @@ -445,7 +451,7 @@ impl Pallet { /// If we detect one or more skipped epochs the policy is to use the authorities and values /// from the first skipped epoch. /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. - pub fn enact_epoch_change( + pub(crate) fn enact_epoch_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< (AuthorityId, SassafrasAuthorityWeight), @@ -461,22 +467,21 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update epoch index - let mut epoch_index = EpochIndex::::get() + let mut epoch_idx = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - // TODO-SASS-P2: Test this, we also have to properly set the epoch index - let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_index)); + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); if slot_idx >= T::EpochDuration::get() { // Detected one or more skipped epochs, kill tickets and recompute the `epoch_index`. - NextTickets::::kill(); + TicketsMeta::::kill(); // TODO-SASS-P2: adjust epoch index (TEST ME) let idx: u64 = slot_idx.into(); - epoch_index += idx / T::EpochDuration::get(); + epoch_idx += idx / T::EpochDuration::get(); } - EpochIndex::::put(epoch_index); + EpochIndex::::put(epoch_idx); - let next_epoch_index = epoch_index + let next_epoch_index = epoch_idx .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); @@ -502,23 +507,16 @@ impl Pallet { // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); // } - Self::update_tickets(); - } - - /// Call this fuction on epoch change to update tickets. - /// Enact next epoch tickets. - fn update_tickets() { - let mut tickets = NextTickets::::take().into_iter().collect::>(); - log::debug!(target: "sassafras", "🌳 @@@@@@@@@ Enacting {} tickets", tickets.len()); - - if tickets.len() > T::MaxTickets::get() as usize { - log::error!(target: "sassafras", "🌳 should never happen..."); - tickets.truncate(T::MaxTickets::get() as usize); + let epoch_key = (epoch_idx & 1) as u8; + let mut tickets_metadata = TicketsMeta::::get(); + // Optionally finish sorting + if tickets_metadata.segments_count != 0 { + Self::sort_tickets(u32::MAX, epoch_key, &mut tickets_metadata); } - - let tickets = BoundedVec::::try_from(tickets) - .expect("vector has been eventually truncated; qed"); - Tickets::::put(tickets); + // Clear the prev (equal to the next) epoch tickets counter. + let next_epoch_key = epoch_key ^ 1; + tickets_metadata.tickets_count[next_epoch_key as usize] = 0; + TicketsMeta::::set(tickets_metadata); } /// Call this function on epoch change to update the randomness. @@ -625,6 +623,8 @@ impl Pallet { Initialized::::put(pre_digest); + // TODO-SASS-P2: incremental parial ordering for NextTickets + // Enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now); } @@ -651,11 +651,11 @@ impl Pallet { /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), /// or if the slot falls beyond the next epoch. - // TODO-SASS-P2: This is a very inefficient and temporary solution. - // On refactory we will come up with a better solution (like a scattered vector). pub fn slot_ticket(slot: Slot) -> Option { + let epoch_idx = EpochIndex::::get(); let duration = T::EpochDuration::get(); - let slot_idx = Self::slot_epoch_index(slot); // % duration; + let mut slot_idx = Self::slot_epoch_index(slot); + let mut tickets_meta = TicketsMeta::::get(); let get_ticket_idx = |slot_idx| { let ticket_idx = if slot_idx < duration / 2 { @@ -663,28 +663,89 @@ impl Pallet { } else { 2 * (duration - (slot_idx + 1)) }; - log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); - ticket_idx as usize + log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); + ticket_idx as u32 }; - if slot_idx < duration { - // Get a ticket for the current epoch. - let tickets = Tickets::::get(); - let ticket_idx = get_ticket_idx(slot_idx); - tickets.get(ticket_idx).cloned() - } else if slot_idx < 2 * duration { - // Get a ticket for the next epoch. Since its state values were not enacted yet, we - // have to fetch it from the `NextTickets` list. This may happen when an author request - // the first ticket for an epoch. - let tickets = NextTickets::::get(); - let ticket_idx = get_ticket_idx(slot_idx - duration); - tickets.iter().nth(ticket_idx).cloned() + let mut epoch_key = (epoch_idx & 1) as u8; + + if duration <= slot_idx && slot_idx < 2 * duration { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_key ^= 1; + slot_idx -= duration; + if tickets_meta.segments_count != 0 { + Self::sort_tickets(tickets_meta.segments_count, epoch_key, &mut tickets_meta); + TicketsMeta::::set(tickets_meta.clone()); + } + } else if slot_idx >= 2 * duration { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < tickets_meta.tickets_count[epoch_key as usize] { + Tickets::::get((epoch_key, ticket_idx)) } else { - // We have no tickets for the requested slot yet. None } } + // Sort the tickets that belong to at most `max_iter` segments starting from the last. + // If the `max_iter` value is equal to the number of segments then the result is truncated + // and saved as the tickets associated to `epoch_key`. + // Else the result is saved within the structure itself to be used on next iterations. + fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { + let mut segments_count = metadata.segments_count; + let max_iter = max_iter.min(segments_count); + let max_tickets = T::MaxTickets::get() as usize; + + let mut new_segment = if metadata.sort_started { + NextTicketsSegments::::take(u32::MAX).into_inner() + } else { + Vec::new() + }; + + let mut require_sort = max_iter != 0; + + let mut sup = if new_segment.len() >= max_tickets { + new_segment[new_segment.len() - 1] + } else { + Ticket::try_from([0xFF; 32]).expect("This is a valid ticket value; qed") + }; + + for _ in 0..max_iter { + let segment = NextTicketsSegments::::take(segments_count); + + segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); + if new_segment.len() > max_tickets { + require_sort = false; + new_segment.sort_unstable(); + new_segment.truncate(max_tickets); + sup = new_segment[new_segment.len() - 1]; + } + + segments_count -= 1; + } + + if require_sort { + new_segment.sort_unstable(); + } + + if segments_count == 0 { + // Sort is over, write to the map. + // TODO-SASS-P2: is there a better way to write a map from a vector? + new_segment.iter().enumerate().for_each(|(i, t)| { + Tickets::::insert((epoch_key, i as u32), t); + }); + metadata.tickets_count[epoch_key as usize] = new_segment.len() as u32; + } else { + NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); + metadata.sort_started = true; + } + + metadata.segments_count = segments_count; + } + /// Submit next epoch validator tickets via an unsigned extrinsic. /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has /// is called within the first half of the epoch. That is, tickets received within the diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 13be7cf1d5034..120120b84e7eb 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -143,7 +143,7 @@ pub fn new_test_ext_with_pairs( (pairs, t.into()) } -fn make_ticket_vrf(slot: Slot, attempt: u64, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { +fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); let mut epoch = Sassafras::epoch_index(); @@ -164,7 +164,7 @@ fn make_ticket_vrf(slot: Slot, attempt: u64, pair: &AuthorityPair) -> (VRFOutput (output, proof) } -pub fn make_tickets(slot: Slot, attempts: u64, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { +pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { (0..attempts) .into_iter() .map(|attempt| make_ticket_vrf(slot, attempt, pair)) diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 27dcdf66917cc..4aa79a2ef23f7 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -41,31 +41,57 @@ use std::collections::BTreeSet; #[test] fn slot_ticket_fetch() { let max_tickets: u32 = ::MaxTickets::get(); + assert_eq!(max_tickets, 6); - let tickets: Vec = (0..max_tickets as u8) + let curr_tickets: Vec = (0..max_tickets as u8) .into_iter() .map(|i| [i; 32].try_into().unwrap()) .collect(); - let tickets = - BoundedVec::<_, _>::try_from(tickets).expect("vector has been eventually truncated; qed"); - new_test_ext(4).execute_with(|| { - Tickets::::put(tickets.clone()); + let next_tickets: Vec = (0..(max_tickets - 1) as u8) + .into_iter() + .map(|i| [max_tickets as u8 + i; 32].try_into().unwrap()) + .collect(); - assert_eq!(Sassafras::slot_ticket(0.into()), Some(tickets[1])); - assert_eq!(Sassafras::slot_ticket(1.into()), Some(tickets[3])); - assert_eq!(Sassafras::slot_ticket(2.into()), Some(tickets[5])); + new_test_ext(4).execute_with(|| { + curr_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((0, i as u32), ticket); + }); + next_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((1, i as u32), ticket); + }); + TicketsMeta::::set(TicketsMetadata { + tickets_count: [max_tickets, max_tickets - 1], + segments_count: 0, + sort_started: false, + }); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(1.into()), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket(2.into()), Some(curr_tickets[5])); assert_eq!(Sassafras::slot_ticket(3.into()), None); assert_eq!(Sassafras::slot_ticket(4.into()), None); assert_eq!(Sassafras::slot_ticket(5.into()), None); assert_eq!(Sassafras::slot_ticket(6.into()), None); - assert_eq!(Sassafras::slot_ticket(7.into()), Some(tickets[4])); - assert_eq!(Sassafras::slot_ticket(8.into()), Some(tickets[2])); - assert_eq!(Sassafras::slot_ticket(9.into()), Some(tickets[0])); - - // TODO-SASS-P2: test next epoch tickets fetch - assert_eq!(Sassafras::slot_ticket(10.into()), None); - + assert_eq!(Sassafras::slot_ticket(7.into()), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket(8.into()), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket(9.into()), Some(curr_tickets[0])); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(10.into()), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket(11.into()), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket(12.into()), None); //Some(next_tickets[5])); + assert_eq!(Sassafras::slot_ticket(13.into()), None); + assert_eq!(Sassafras::slot_ticket(14.into()), None); + assert_eq!(Sassafras::slot_ticket(15.into()), None); + assert_eq!(Sassafras::slot_ticket(16.into()), None); + assert_eq!(Sassafras::slot_ticket(17.into()), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket(18.into()), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket(19.into()), Some(next_tickets[0])); + + // Test fetch beyend next session + assert_eq!(Sassafras::slot_ticket(20.into()), None); assert_eq!(Sassafras::slot_ticket(42.into()), None); }); } @@ -79,7 +105,7 @@ fn genesis_values() { } #[test] -fn on_first_after_genesis_block() { +fn on_first_block_after_genesis() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { @@ -117,7 +143,7 @@ fn on_first_after_genesis_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("98dc63bd10704f60016011be269a02ec780e9b870222d12457ea7e8a05065028"), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), ); // Header data check @@ -166,7 +192,7 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("98dc63bd10704f60016011be269a02ec780e9b870222d12457ea7e8a05065028"), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), ); Sassafras::on_finalize(2); @@ -184,7 +210,7 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("180f852e5a4f4370071072402c395758efdb2a417e99deaed34acc269125ac3e"), + hex!("ea16f22af4afe5bfb8e3be3e257c3a88ae0c2406a4afc067871b6e5a7ae8756e"), ); // Header data check @@ -221,11 +247,11 @@ fn epoch_change_block() { assert_eq!(Sassafras::randomness(), [0; 32],); assert_eq!( NextRandomness::::get(), - hex!("dae0db238bd08ec36537d924cade5e5ad668e83f4e9a200a1e6aa1102919c999"), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), ); assert_eq!( RandomnessAccumulator::::get(), - hex!("4cfa0840c842f6095155b35bad7f0bf8113c11a12a8ab3e3d116d91b0e8f31f9"), + hex!("ec9f2acd75e3a901b3a3fad95267a275af1aded3df8bebebb8d14ebd2190ce59"), ); Sassafras::on_finalize(start_block + epoch_duration); @@ -242,11 +268,11 @@ fn epoch_change_block() { assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), - hex!("dae0db238bd08ec36537d924cade5e5ad668e83f4e9a200a1e6aa1102919c999"), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), ); assert_eq!( RandomnessAccumulator::::get(), - hex!("98ed5e9a57afafaea3fddd98555a616f0fefdde27e316ca42cd29de323f90d2a"), + hex!("d017578d6bad1856315866ce1ef845c2584873fcbc011db7dcb99f1f19baa6f3"), ); // Header data check @@ -272,6 +298,7 @@ fn submit_enact_claim_tickets() { ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); System::initialize(&start_block, &Default::default(), &digest); @@ -283,40 +310,40 @@ fn submit_enact_claim_tickets() { let _digest = progress_to_block(2, &pairs[0]).unwrap(); // Check state before tickets submission - assert!(Tickets::::get().is_empty()); - assert!(NextTickets::::get().is_empty()); + assert!(Tickets::::iter().next().is_none()); - // Submit authoring tickets. - let mut tickets: Vec = make_tickets(start_slot + 1, 30, &pairs[0]) + // Submit authoring tickets in three different batches. + // We can ignore the threshold since we are not passing through the unsigned extrinsic + // validation. + let mut tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]) .into_iter() .map(|(output, _)| output) .collect(); + let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets0).unwrap(); + let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets1).unwrap(); + let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets2).unwrap(); - Sassafras::submit_tickets(Origin::none(), tickets.clone()).unwrap(); - - let max_tickets: u32 = ::MaxTickets::get(); tickets.sort(); - let front = tickets.iter().take(max_tickets as usize / 2); - let back = tickets.iter().rev().take(max_tickets as usize / 2); - let mut expected_tickets = front.chain(back).map(|t| *t).collect::>(); - expected_tickets.sort(); + tickets.truncate(max_tickets as usize); + let expected_tickets = tickets; - // Check state - assert!(Tickets::::get().is_empty()); - let next_tickets = NextTickets::::get().into_iter().collect::>(); - assert_eq!(expected_tickets, next_tickets); + // Check state after submit + let meta = TicketsMeta::::get(); + assert!(Tickets::::iter().next().is_none()); + assert_eq!(meta.segments_count, 3); + assert_eq!(meta.tickets_count, [0, 0]); // Process up to the last epoch slot (do not enact epoch change) let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); - assert!(Tickets::::get().is_empty()); - let next_tickets = NextTickets::::get().into_iter().collect::>(); - assert_eq!(expected_tickets, next_tickets); + + // TODO-SASS-P2: at this point next tickets should have been sorted + //assert_eq!(NextTicketsSegmentsCount::::get(), 0); + //assert!(Tickets::::iter().next().is_some()); // Check if we can claim next epoch tickets in outside-in fashion. - // - // This is to allow native code to eventually fetch the first ticket for a new epoch, - // before the epoch data is effectivelly enacted by the runtime - // (block authors tries to claim a ticket before block construction). let slot = Sassafras::current_slot(); assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); @@ -328,12 +355,13 @@ fn submit_enact_claim_tickets() { assert_eq!(Sassafras::slot_ticket(slot + 10).unwrap(), expected_tickets[0]); assert!(Sassafras::slot_ticket(slot + 11).is_none()); - // Enact epoch tickets by progressing one more block + // Enact session change by progressing one more block let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); - let curr_tickets = Tickets::::get().into_iter().collect::>(); - assert_eq!(expected_tickets, curr_tickets); - assert!(NextTickets::::get().is_empty()); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); let slot = Sassafras::current_slot(); assert_eq!(Sassafras::slot_ticket(slot).unwrap(), expected_tickets[1]); @@ -355,23 +383,27 @@ fn block_skips_epochs() { ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; + let epoch_duration: u64 = ::EpochDuration::get(); let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); System::initialize(&start_block, &Default::default(), &digest); Sassafras::on_initialize(start_block); - let tickets: Vec = make_tickets(start_slot + 1, 30, &pairs[0]) + let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]) .into_iter() .map(|(output, _)| output) .collect(); - Sassafras::submit_tickets(Origin::none(), tickets.clone()).unwrap(); + Sassafras::submit_tickets(Origin::none(), BoundedVec::truncate_from(tickets.clone())) + .unwrap(); + + // Force enact of next tickets + assert_eq!(TicketsMeta::::get().segments_count, 1); + Sassafras::slot_ticket(start_slot + epoch_duration).unwrap(); + assert_eq!(TicketsMeta::::get().segments_count, 0); - assert!(Tickets::::get().is_empty()); - assert!(!NextTickets::::get().is_empty()); let next_random = NextRandomness::::get(); // We want to trigger an skip epoch in this test. - let epoch_duration: u64 = ::EpochDuration::get(); let offset = 3 * epoch_duration; let _digest = go_to_block(start_block + offset, start_slot + offset, &pairs[0]); @@ -383,9 +415,10 @@ fn block_skips_epochs() { assert_eq!(Sassafras::epoch_index(), 3); assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); assert_eq!(Sassafras::current_slot_epoch_index(), 0); + // Tickets were discarded - assert!(Tickets::::get().is_empty()); - assert!(NextTickets::::get().is_empty()); + let meta = TicketsMeta::::get(); + assert_eq!(meta, TicketsMetadata::default()); // We've used the last known next epoch randomness as a fallback assert_eq!(next_random, Sassafras::randomness()); }); diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index df043ecd71956..11206ed17d80d 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -165,10 +165,10 @@ pub fn make_slot_transcript_data( } /// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &Randomness, attempt: u64, epoch: u64) -> Transcript { +pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); - transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt); + transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); transcript @@ -178,14 +178,14 @@ pub fn make_ticket_transcript(randomness: &Randomness, attempt: u64, epoch: u64) #[cfg(feature = "std")] pub fn make_ticket_transcript_data( randomness: &Randomness, - attempt: u64, + attempt: u32, epoch: u64, ) -> VRFTranscriptData { VRFTranscriptData { label: &SASSAFRAS_ENGINE_ID, items: vec![ (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), - (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt)), + (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), ], From e7208e7679c8557be8fc9c2bcf76d779f893aed7 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 17 Aug 2022 19:31:18 +0200 Subject: [PATCH 09/14] Code refactory. Logic divided over multiple files --- bin/node-sassafras/node/src/chain_spec.rs | 12 +- bin/node-sassafras/node/src/service.rs | 6 +- bin/node-sassafras/runtime/src/lib.rs | 12 +- client/consensus/sassafras/src/authorship.rs | 443 +++++- client/consensus/sassafras/src/aux_schema.rs | 2 +- .../consensus/sassafras/src/block_import.rs | 371 +++++ client/consensus/sassafras/src/lib.rs | 1279 ++--------------- .../consensus/sassafras/src/verification.rs | 318 +++- frame/sassafras/src/lib.rs | 64 +- frame/sassafras/src/tests.rs | 1 + primitives/consensus/sassafras/src/digests.rs | 8 +- primitives/consensus/sassafras/src/lib.rs | 38 +- 12 files changed, 1286 insertions(+), 1268 deletions(-) create mode 100644 client/consensus/sassafras/src/block_import.rs diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 76c578d5a2a1d..17fefdee11fb1 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -3,13 +3,14 @@ use node_sassafras_runtime::{ SudoConfig, SystemConfig, WASM_BINARY, }; use sc_service::ChainType; -use sp_consensus_sassafras::AuthorityId as SassafrasId; +use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; -// The URL for the telemetry server. -// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +// Genesis constants for Sassafras parameters configuration. +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 32; +const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; @@ -143,7 +144,10 @@ fn testnet_genesis( }, sassafras: SassafrasConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), - epoch_config: node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG, + epoch_config: SassafrasEpochConfiguration { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, }, grandpa: GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index ec8f10c1a59b1..33f66262c6dda 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -116,12 +116,12 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( - sc_consensus_sassafras::Config::get(&*client)?, + sc_consensus_sassafras::configuration(&*client)?, grandpa_block_import, client.clone(), )?; - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let import_queue = sc_consensus_sassafras::import_queue( sassafras_link.clone(), @@ -269,7 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let sassafras_config = sc_consensus_sassafras::SassafrasParams { client: client.clone(), diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 307c82115cfae..69e66c1c198d9 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -126,12 +126,6 @@ pub const DAYS: BlockNumber = HOURS * 24; pub const MAX_AUTHORITIES: u32 = 32; -/// The Sassafras epoch configuration at genesis. -pub const SASSAFRAS_GENESIS_EPOCH_CONFIG: sp_consensus_sassafras::SassafrasEpochConfiguration = - sp_consensus_sassafras::SassafrasEpochConfiguration { - // TODO-SASS-P2 - }; - /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -405,11 +399,11 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasGenesisConfiguration { - sp_consensus_sassafras::SassafrasGenesisConfiguration { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + sp_consensus_sassafras::SassafrasConfiguration { slot_duration: Sassafras::slot_duration(), epoch_length: EpochDuration::get(), - genesis_authorities: Sassafras::authorities().to_vec(), + authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), } } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 1542977a0a60d..7d7059022ec6b 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,19 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Sassafras authority selection and slot claiming. +//! Types and functions related to authority selection and slot claiming. -use crate::Epoch; +use super::*; -use scale_codec::Encode; -use sp_application_crypto::AppKey; use sp_consensus_sassafras::{ digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, Ticket, TicketInfo, }; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::{twox_64, ByteArray}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// Get secondary authority index for the given epoch and slot. #[inline] @@ -85,13 +81,10 @@ pub fn claim_slot( /// Generate the tickets for the given epoch. /// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` /// structure. The additional information will be used during epoch to claim slots. -pub fn generate_epoch_tickets( - epoch: &mut Epoch, - max_attempts: u32, - redundancy_factor: u32, - keystore: &SyncCryptoStorePtr, -) -> Vec { +pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { let mut tickets = vec![]; + let max_attempts = epoch.config.attempts_number; + let redundancy_factor = epoch.config.redundancy_factor; let threshold = sp_consensus_sassafras::compute_threshold( redundancy_factor, @@ -111,7 +104,7 @@ pub fn generate_epoch_tickets( let make_ticket = |attempt| { let transcript_data = - make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); + make_ticket_transcript_data(&epoch.randomness, attempt, epoch.epoch_index); // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. @@ -146,3 +139,427 @@ pub fn generate_epoch_tickets( } tickets } + +struct SassafrasSlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + genesis_config: SassafrasConfiguration, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SassafrasSlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "sassafras" + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn epoch_data( + &self, + parent: &B::Header, + slot: Slot, + ) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + async fn claim_slot( + &self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); + + // Get the next slot ticket from the runtime. + let block_id = BlockId::Hash(parent_header.hash()); + let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; + + // TODO-SASS-P2 + debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); + + let claim = authorship::claim_slot( + slot, + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))? + .as_ref(), + ticket, + &self.keystore, + ); + if claim.is_some() { + debug!(target: "sassafras", "🌳 Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + true + } else { + false + }, + } + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![::sassafras_pre_digest(claim.0.clone())] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges<>::Transaction, B>, + (_, public): Self::Claim, + epoch_descriptor: Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>::Transaction>, + sp_consensus::Error, + > { + // Sign the pre-sealed hash of the block and then add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*self.keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = ::sassafras_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok(import_block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO-SASS-P2 + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO-SASS-P2 + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +async fn tickets_worker( + client: Arc, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let tickets = epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| authorship::generate_epoch_tickets(epoch, &keystore)) + .unwrap_or_default(); + + if tickets.is_empty() { + continue + } + + // Get the best block on which we will build and send the tickets. + let best_id = match select_chain.best_chain().await { + Ok(header) => BlockId::Hash(header.hash()), + Err(err) => { + error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); + continue + }, + }; + + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + if let Some(err) = err { + error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + // Remove tickets from epoch tree node. + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| epoch.tickets_info.clear()); + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +/// Parameters for Sassafras. +pub struct SassafrasParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: SyncCryptoStorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + /// The source of timestamps for relative slots + pub sassafras_link: SassafrasLink, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + can_author_with, + }: SassafrasParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + PreCommitActions + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer>, + I: BlockImport> + + Send + + Sync + + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send + Sync + 'static, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let slot_worker = SassafrasSlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + genesis_config: sassafras_link.genesis_config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + sassafras_link.genesis_config.slot_duration(), + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), + sync_oracle, + create_inherent_data_providers, + can_author_with, + ); + + let tickets_worker = tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 59f53415a31d2..07f723341b069 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Schema for Sassafras epoch changes in the auxiliary db. +//! Schema for auxiliary data persistence. use scale_codec::{Decode, Encode}; diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs new file mode 100644 index 0000000000000..dddeb2155c0a3 --- /dev/null +++ b/client/consensus/sassafras/src/block_import.rs @@ -0,0 +1,371 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block import. + +use super::*; + +/// A block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + genesis_config: self.genesis_config.clone(), + } + } +} + +impl SassafrasBlockImport { + /// Constructor. + pub fn new( + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid sassafras headers must contain a predigest; header has been already verified; qed", + ); + let slot = pre_digest.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( + "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ + header has already been verified; qed", + ); + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // If there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + // Use an extra scope to make the compiler happy, because otherwise he complains about the + // mutex, even if we dropped it... + let mut epoch_changes = { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + + let added_weight = pre_digest.ticket_info.is_some() as u32; + let total_weight = parent_weight + added_weight; + + // Search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + let info = self.client.info(); + + if let Some(mut next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + }) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "sassafras", + log_level, + "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + if next_epoch_descriptor.config.is_none() { + next_epoch_descriptor.config = Some(viable_epoch.as_ref().config.clone()); + } + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "sassafras", + log_level, + "🌳 🍁 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| { + ConsensusError::ClientImport(format!( + "Error importing epoch changes: {}", + e + )) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + *epoch_changes = + old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + "No block weight for parent header.".to_string(), + ) + })? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; + // Release the mutex, but it stays locked + epoch_changes.release_mutex() + }; + + let import_result = self.inner.import_block(block, new_cache).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + if import_result.is_err() { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes.upgrade() = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + if info.block_gap.is_none() { + epoch_changes.clear_gap(); + } + + let finalized_slot = { + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect( + "best finalized hash was given by client; finalized headers must exist in db; qed", + ); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + genesis_config: SassafrasConfiguration, + inner_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + HeaderBackend + HeaderMetadata + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let link = SassafrasLink { + epoch_changes: epoch_changes.clone(), + genesis_config: genesis_config.clone(), + }; + + let block_import = + SassafrasBlockImport::new(inner_block_import, client, epoch_changes, genesis_config); + + Ok((block_import, link)) +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index d6abbbb2ed531..b62baa6618e9c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -52,7 +52,8 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, }, - import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue}, + Verifier, }; use sc_consensus_epochs::{ descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, @@ -70,8 +71,8 @@ use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; -use sp_consensus_slots::{Slot, SlotDuration}; -use sp_core::{crypto::ByteArray, ExecutionContext}; +use sp_consensus_slots::Slot; +use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ @@ -84,77 +85,22 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, - AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, SassafrasGenesisConfiguration, Ticket, TicketInfo, VRFOutput, - VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, AuthoritySignature, + SassafrasApi, SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, + Ticket, TicketInfo, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, + VRF_PROOF_LENGTH, }; mod authorship; mod aux_schema; +mod block_import; mod verification; -/// Sassafras epoch information -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] -pub struct Epoch { - /// The epoch index. - pub epoch_index: u64, - /// The starting slot of the epoch. - pub start_slot: Slot, - /// The duration of this epoch in slots. - pub duration: u64, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], - /// Configuration of the epoch. - pub config: SassafrasEpochConfiguration, - /// Tickets metadata. - pub tickets_info: BTreeMap, -} - -impl EpochT for Epoch { - type NextEpochDescriptor = NextEpochDescriptor; - type Slot = Slot; - - fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - // TODO-SASS-P2: allow config change on epoch change - config: self.config.clone(), - tickets_info: BTreeMap::new(), - } - } - - fn start_slot(&self) -> Slot { - self.start_slot - } - - fn end_slot(&self) -> Slot { - self.start_slot + self.duration - } -} - -impl Epoch { - /// Create the genesis epoch (epoch #0). This is defined to start at the slot of - /// the first block, so that has to be provided. - pub fn genesis(genesis_config: &SassafrasGenesisConfiguration, slot: Slot) -> Epoch { - Epoch { - epoch_index: 0, - start_slot: slot, - duration: genesis_config.epoch_length, - authorities: genesis_config.genesis_authorities.clone(), - randomness: genesis_config.randomness, - config: SassafrasEpochConfiguration {}, - tickets_info: BTreeMap::new(), - } - } -} +pub use authorship::{start_sassafras, SassafrasParams, SassafrasWorker}; +pub use block_import::{block_import, SassafrasBlockImport}; +pub use verification::SassafrasVerifier; -/// Errors encountered by the Sassafras authorship task. +/// Errors encountered by the Sassafras routines. /// TODO-SASS-P2: remove unused errors. #[derive(Debug, thiserror::Error)] pub enum Error { @@ -167,12 +113,6 @@ pub enum Error { /// Multiple Sassafras epoch change digests #[error("Multiple Sassafras epoch change digests")] MultipleEpochChangeDigests, - // /// Multiple Sassafras config change digests - // #[error("Multiple Sassafras config change digests, rejecting!")] - // MultipleConfigChangeDigests, - // /// Could not extract timestamp and slot - // #[error("Could not extract timestamp and slot: {0}")] - // Extraction(sp_consensus::Error), /// Could not fetch epoch #[error("Could not fetch epoch at {0:?}")] FetchEpoch(B::Hash), @@ -197,12 +137,6 @@ pub enum Error { /// Bad signature #[error("Bad signature on {0:?}")] BadSignature(B::Hash), - // /// Invalid author: Expected secondary author - // #[error("Invalid author: Expected secondary author: {0:?}, got: {1:?}.")] - // InvalidAuthor(AuthorityId, AuthorityId), - // /// VRF verification of block by author failed - // #[error("VRF verification of block by author {0:?} failed: threshold {1} exceeded")] - // VRFVerificationOfBlockFailed(AuthorityId, u128), /// VRF verification failed #[error("VRF verification failed: {0:?}")] VRFVerificationFailed(SignatureError), @@ -215,9 +149,6 @@ pub enum Error { /// Expected epoch change to happen. #[error("Expected epoch change to happen at {0:?}, s{1}")] ExpectedEpochChange(B::Hash, Slot), - // /// Unexpected config change. - // #[error("Unexpected config change")] - // UnexpectedConfigChange, /// Unexpected epoch change #[error("Unexpected epoch change")] UnexpectedEpochChange, @@ -250,495 +181,103 @@ impl From> for String { } } +// Convenience function fn sassafras_err(error: Error) -> Error { error!(target: "sassafras", "🌳 {}", error); error } -/// Intermediate value passed to block importer. -pub struct SassafrasIntermediate { - /// The epoch descriptor. - pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +/// Sassafras epoch information +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// The duration of this epoch in slots. + pub duration: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// Configuration parameters of the epoch. + pub config: SassafrasEpochConfiguration, + /// Tickets metadata. + pub tickets_info: BTreeMap, } -/// Intermediate key for Babe engine. -pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; - -/// Configuration for Sassafras used for defining block verification parameters as -/// well as authoring (e.g. the slot duration). -#[derive(Clone)] -pub struct Config { - genesis_config: SassafrasGenesisConfiguration, -} +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; -impl Config { - /// Read Sassafras genesis configuration from the runtime. - /// - /// TODO-SASS-P4: (FIXME) - /// This doesn't return the genesis configuration, but the Configuration at best block. - /// There is an open [PR](https://github.com/paritytech/substrate/pull/11760) for BABE, - /// we'll follow the same strategy once it is closed. - pub fn get(client: &C) -> ClientResult - where - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: SassafrasApi, - { - let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); - if client.usage_info().chain.finalized_state.is_none() { - debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); - best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + config: descriptor.config.expect("configuration should have been set"), + tickets_info: BTreeMap::new(), } - - let genesis_config = client.runtime_api().configuration(&best_block_id)?; - - Ok(Config { genesis_config }) } - /// Get the genesis configuration. - pub fn genesis_config(&self) -> &SassafrasGenesisConfiguration { - &self.genesis_config + fn start_slot(&self) -> Slot { + self.start_slot } - /// Get the slot duration defined in the genesis configuration. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.genesis_config.slot_duration) + fn end_slot(&self) -> Slot { + self.start_slot + self.duration } } -/// Parameters for Sassafras. -pub struct SassafrasParams { - /// The client to use - pub client: Arc, - /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, - /// The chain selection strategy - pub select_chain: SC, - /// The environment we are producing blocks for. - pub env: EN, - /// The underlying block-import object to supply our produced blocks to. - /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise - /// critical consensus logic will be omitted. - pub block_import: I, - /// A sync oracle - pub sync_oracle: SO, - /// Hook into the sync module to control the justification sync process. - pub justification_sync_link: L, - /// Something that can create the inherent data providers. - pub create_inherent_data_providers: CIDP, - /// Force authoring of blocks even if we are offline - pub force_authoring: bool, - /// The source of timestamps for relative slots - pub sassafras_link: SassafrasLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(config: &SassafrasConfiguration, slot: Slot) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot, + duration: config.epoch_length, + authorities: config.authorities.clone(), + randomness: config.randomness, + config: SassafrasEpochConfiguration::default(), + tickets_info: BTreeMap::new(), + } + } } -/// Start the Sassafras worker. -pub fn start_sassafras( - SassafrasParams { - client, - keystore, - select_chain, - env, - block_import, - sync_oracle, - justification_sync_link, - create_inherent_data_providers, - force_authoring, - sassafras_link, - can_author_with, - }: SassafrasParams, -) -> Result, sp_consensus::Error> +/// TODO-SASS-P2 +pub fn configuration(client: &C) -> ClientResult where B: BlockT, - C: ProvideRuntimeApi - + ProvideUncles - + BlockchainEvents - + PreCommitActions - + HeaderBackend - + HeaderMetadata - + Send - + Sync - + 'static, + // TODO-SASS-P2: we require all these bunds? + C: AuxStore + ProvideRuntimeApi + UsageProvider, C::Api: SassafrasApi, - SC: SelectChain + 'static, - EN: Environment + Send + Sync + 'static, - EN::Proposer: Proposer>, - I: BlockImport> - + Send - + Sync - + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - L: sc_consensus::JustificationSyncLink + 'static, - CIDP: CreateInherentDataProviders + Send + Sync + 'static, - CIDP::InherentDataProviders: InherentDataProviderExt + Send, - CAW: CanAuthorWith + Send + Sync + 'static, - ER: std::error::Error + Send + From + From + 'static, { - info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); - - let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); - - let worker = SassafrasSlotWorker { - client: client.clone(), - block_import, - env, - sync_oracle: sync_oracle.clone(), - justification_sync_link, - force_authoring, - keystore: keystore.clone(), - epoch_changes: sassafras_link.epoch_changes.clone(), - slot_notification_sinks: slot_notification_sinks.clone(), - config: sassafras_link.config.clone(), + let hash = if client.usage_info().chain.finalized_state.is_some() { + client.usage_info().chain.best_hash + } else { + debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); + client.usage_info().chain.genesis_hash }; - let slot_worker = sc_consensus_slots::start_slot_worker( - sassafras_link.config.slot_duration(), - select_chain.clone(), - sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), - sync_oracle, - create_inherent_data_providers, - can_author_with, - ); - - let tickets_worker = tickets_worker( - client.clone(), - keystore, - sassafras_link.epoch_changes.clone(), - select_chain, - ); - - let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); - - Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) -} - -async fn tickets_worker( - client: Arc, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - select_chain: SC, -) where - B: BlockT, - C: BlockchainEvents + ProvideRuntimeApi, - C::Api: SassafrasApi, - SC: SelectChain + 'static, -{ - let mut notifications = client.import_notification_stream(); - while let Some(notification) = notifications.next().await { - let epoch_desc = match find_next_epoch_digest::(¬ification.header) { - Ok(Some(epoch_desc)) => epoch_desc, - Err(err) => { - warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); - continue - }, - _ => continue, - }; - - debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); - - let number = *notification.header.number(); - let position = if number == One::one() { - EpochIdentifierPosition::Genesis1 - } else { - EpochIdentifierPosition::Regular - }; - let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; - - let attempts = sp_consensus_sassafras::TICKET_MAX_ATTEMPTS; - let redundancy = sp_consensus_sassafras::TICKET_REDUNDANCY_FACTOR; - let tickets = epoch_changes - .shared_data() - .epoch_mut(&epoch_identifier) - .map(|epoch| authorship::generate_epoch_tickets(epoch, attempts, redundancy, &keystore)) - .unwrap_or_default(); - - if tickets.is_empty() { - continue - } - - // Get the best block on which we will build and send the tickets. - let best_id = match select_chain.best_chain().await { - Ok(header) => BlockId::Hash(header.hash()), - Err(err) => { - error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); - continue - }, - }; - - let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { - Err(err) => Some(err.to_string()), - Ok(false) => Some("Unknown reason".to_string()), - _ => None, - }; - if let Some(err) = err { - error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); - // Remove tickets from epoch tree node. - epoch_changes - .shared_data() - .epoch_mut(&epoch_identifier) - .map(|epoch| epoch.tickets_info.clear()); - } - } -} - -/// Worker for Sassafras which implements `Future`. This must be polled. -pub struct SassafrasWorker { - inner: Pin + Send + 'static>>, - slot_notification_sinks: SlotNotificationSinks, -} - -impl SassafrasWorker { - /// Return an event stream of notifications for when new slot happens, and the corresponding - /// epoch descriptor. - pub fn slot_notification_stream( - &self, - ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { - const CHANNEL_BUFFER_SIZE: usize = 1024; - - let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); - self.slot_notification_sinks.lock().push(sink); - stream - } -} - -impl Future for SassafrasWorker { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.inner.as_mut().poll(cx) - } + let config = client.runtime_api().configuration(&BlockId::Hash(hash))?; + Ok(config) } -/// Slot notification sinks. -type SlotNotificationSinks = Arc< - Mutex::Hash, NumberFor, Epoch>)>>>, ->; - -struct SassafrasSlotWorker { - client: Arc, - block_import: I, - env: E, - sync_oracle: SO, - justification_sync_link: L, - force_authoring: bool, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - slot_notification_sinks: SlotNotificationSinks, - config: Config, +/// Intermediate value passed to block importer from authoring or validation logic. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } -#[async_trait::async_trait] -impl sc_consensus_slots::SimpleSlotWorker - for SassafrasSlotWorker -where - B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, - C::Api: SassafrasApi, - E: Environment + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Clone + Sync, - L: sc_consensus::JustificationSyncLink, - ER: std::error::Error + Send + 'static, -{ - type EpochData = ViableEpochDescriptor, Epoch>; - type Claim = (PreDigest, AuthorityId); - type SyncOracle = SO; - type JustificationSyncLink = L; - type CreateProposer = - Pin> + Send + 'static>>; - type Proposer = E::Proposer; - type BlockImport = I; - - fn logging_target(&self) -> &'static str { - "sassafras" - } - - fn block_import(&mut self) -> &mut Self::BlockImport { - &mut self.block_import - } - - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { - self.epoch_changes - .shared_data() - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - *parent.number(), - slot, - ) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) - } - - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .map(|epoch| epoch.as_ref().authorities.len()) - } - - async fn claim_slot( - &self, - parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) -> Option { - debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); - - // Get the next slot ticket from the runtime. - let block_id = BlockId::Hash(parent_header.hash()); - let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; - - // TODO-SASS-P2 - debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); - - let claim = authorship::claim_slot( - slot, - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - })? - .as_ref(), - ticket, - &self.keystore, - ); - - if claim.is_some() { - debug!(target: "sassafras", "🌳 Claimed slot {}", slot); - } - claim - } - - fn notify_slot( - &self, - _parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } - }); - } - - fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { - vec![::sassafras_pre_digest(claim.0.clone())] - } - - async fn block_import_params( - &self, - header: B::Header, - header_hash: &B::Hash, - body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, - (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, - > { - // Sign the pre-sealed hash of the block and then add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - ::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), - ) - })?; - let signature: AuthoritySignature = signature - .clone() - .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = ::sassafras_seal(signature); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - - Ok(import_block) - } - - fn force_authoring(&self) -> bool { - self.force_authoring - } - - fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { - // TODO-SASS-P2 - false - } - - fn sync_oracle(&mut self) -> &mut Self::SyncOracle { - &mut self.sync_oracle - } - - fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { - &mut self.justification_sync_link - } - - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin( - self.env - .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), - ) - } - - fn telemetry(&self) -> Option { - // TODO-SASS-P2 - None - } - - fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); - - // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' - let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); - - sc_consensus_slots::proposing_remaining_duration( - parent_slot, - slot_info, - &block_proposal_slot_portion, - None, - sc_consensus_slots::SlotLenienceType::Exponential, - self.logging_target(), - ) - } -} +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; /// Extract the Sassafras pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -pub fn find_pre_digest(header: &B::Header) -> Result> { +fn find_pre_digest(header: &B::Header) -> Result> { // Genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { @@ -788,639 +327,17 @@ fn find_next_epoch_digest( /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct SassafrasLink { + /// Epoch changes tree epoch_changes: SharedEpochChanges, - config: Config, + /// Startup configuration. Read from runtime at last finalized block. + genesis_config: SassafrasConfiguration, } impl SassafrasLink { - /// Get the epoch changes of this link. - pub fn epoch_changes(&self) -> &SharedEpochChanges { - &self.epoch_changes - } - /// Get the config of this link. - pub fn config(&self) -> &Config { - &self.config - } -} - -/// A verifier for Sassafras blocks. -pub struct SassafrasVerifier { - client: Arc, - select_chain: SelectChain, - create_inherent_data_providers: CIDP, - config: Config, - epoch_changes: SharedEpochChanges, - can_author_with: CAW, - telemetry: Option, -} - -impl SassafrasVerifier -where - Block: BlockT, - Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith, - CIDP: CreateInherentDataProviders, -{ - async fn check_inherents( - &self, - block: Block, - block_id: BlockId, - inherent_data: InherentData, - create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, - ) -> Result<(), Error> { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "sassafras", - "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - - let inherent_res = self - .client - .runtime_api() - .check_inherents_with_context(&block_id, execution_context, block, inherent_data) - .map_err(Error::RuntimeApi)?; - - if !inherent_res.ok() { - for (i, e) in inherent_res.into_errors() { - match create_inherent_data_providers.try_handle_error(&i, &e).await { - Some(res) => res.map_err(|e| Error::CheckInherents(e))?, - None => return Err(Error::CheckInherentsUnhandled(i)), - } - } - } - - Ok(()) - } - - async fn check_and_report_equivocation( - &self, - slot_now: Slot, - slot: Slot, - header: &Block::Header, - author: &AuthorityId, - origin: &BlockOrigin, - ) -> Result<(), Error> { - // Don't report any equivocations during initial sync as they are most likely stale. - if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()) - } - - // Check if authorship of this header is an equivocation and return a proof if so. - let equivocation_proof = - match check_equivocation(&*self.client, slot_now, slot, header, author) - .map_err(Error::Client)? - { - Some(proof) => proof, - None => return Ok(()), - }; - - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - slot, - equivocation_proof.first_header.hash(), - equivocation_proof.second_header.hash(), - ); - - // Get the best block on which we will build and send the equivocation report. - let _best_id: BlockId = self - .select_chain - .best_chain() - .await - .map(|h| BlockId::Hash(h.hash())) - .map_err(|e| Error::Client(e.into()))?; - - // TODO-SASS-P2 - - Ok(()) - } -} - -type BlockVerificationResult = - Result<(BlockImportParams, Option)>>), String>; - -#[async_trait::async_trait] -impl Verifier - for SassafrasVerifier -where - Block: BlockT, - Client: HeaderMetadata - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + AuxStore, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith + Send + Sync, - CIDP: CreateInherentDataProviders + Send + Sync, - CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, -{ - async fn verify( - &mut self, - mut block: BlockImportParams, - ) -> BlockVerificationResult { - trace!( - target: "sassafras", - "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", - block.origin, - block.header, - block.justifications, - block.body, - ); - - if block.with_state() { - // When importing whole state we don't calculate epoch descriptor, but rather - // read it from the state after import. We also skip all verifications - // because there's no parent state and we trust the sync module to verify - // that the state is correct and finalized. - return Ok((block, Default::default())) - } - - trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); - - let hash = block.header.hash(); - let parent_hash = *block.header.parent_hash(); - - let create_inherent_data_providers = self - .create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await - .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; - - let slot_now = create_inherent_data_providers.slot(); - - let parent_header_metadata = self - .client - .header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; - - let pre_digest = find_pre_digest::(&block.header)?; - - let (check_header, epoch_descriptor) = { - let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot, - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or(Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or(Error::::FetchEpoch(parent_hash))?; - - let ticket = self - .client - .runtime_api() - .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) - .map_err(|err| err.to_string())?; - - let v_params = verification::VerificationParams { - header: block.header.clone(), - pre_digest, - slot_now, - epoch: viable_epoch.as_ref(), - ticket, - }; - - (verification::check_header::(v_params)?, epoch_descriptor) - }; - - match check_header { - CheckedHeader::Checked(pre_header, verified_info) => { - let sassafras_pre_digest = verified_info - .pre_digest - .as_sassafras_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - let slot = sassafras_pre_digest.slot; - - // The header is valid but let's check if there was something else already - // proposed at the same slot by the given author. If there was, we will - // report the equivocation to the runtime. - if let Err(err) = self - .check_and_report_equivocation( - slot_now, - slot, - &block.header, - &verified_info.author, - &block.origin, - ) - .await - { - warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); - } - - // If the body is passed through, we need to use the runtime to check that the - // internally-set timestamp in the inherents actually matches the slot set in the - // seal. - if let Some(inner_body) = block.body { - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(slot); - let new_block = Block::new(pre_header.clone(), inner_body); - - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; - - let (_, inner_body) = new_block.deconstruct(); - block.body = Some(inner_body); - } - - trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); - telemetry!( - self.telemetry; - CONSENSUS_TRACE; - "sassafras.checked_and_importing"; - "pre_header" => ?pre_header, - ); - - block.header = pre_header; - block.post_digests.push(verified_info.seal); - block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - block.post_hash = Some(hash); - - Ok((block, Default::default())) - }, - CheckedHeader::Deferred(a, b) => { - debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!( - self.telemetry; - CONSENSUS_DEBUG; - "sassafras.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(Error::::TooFarInFuture(hash).into()) - }, - } - } -} - -/// A block-import handler for Sassafras. -/// -/// This scans each imported block for epoch change announcements. The announcements are -/// tracked in a tree (of all forks), and the import logic validates all epoch change -/// transitions, i.e. whether a given epoch change is expected or whether it is missing. -/// -/// The epoch change tree should be pruned as blocks are finalized. -pub struct SassafrasBlockImport { - inner: I, - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, -} - -impl Clone for SassafrasBlockImport { - fn clone(&self) -> Self { - SassafrasBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - epoch_changes: self.epoch_changes.clone(), - config: self.config.clone(), - } - } -} - -impl SassafrasBlockImport { - fn new( - client: Arc, - epoch_changes: SharedEpochChanges, - block_import: I, - config: Config, - ) -> Self { - SassafrasBlockImport { client, inner: block_import, epoch_changes, config } - } -} - -#[async_trait::async_trait] -impl BlockImport for SassafrasBlockImport -where - Block: BlockT, - Inner: BlockImport> + Send + Sync, - Inner::Error: Into, - Client: HeaderBackend - + HeaderMetadata - + AuxStore - + ProvideRuntimeApi - + Send - + Sync, - Client::Api: SassafrasApi + ApiExt, -{ - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - async fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let number = *block.header.number(); - - let pre_digest = find_pre_digest::(&block.header).expect( - "valid sassafras headers must contain a predigest; header has been already verified; qed", - ); - let slot = pre_digest.slot; - - let parent_hash = *block.header.parent_hash(); - let parent_header = self - .client - .header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), - ) - })?; - - let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( - "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ - header has already been verified; qed", - ); - - // Make sure that slot number is strictly increasing - if slot <= parent_slot { - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), - )) - } - - // If there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; - - // Use an extra scope to make the compiler happy, because otherwise he complains about the - // mutex, even if we dropped it... - let mut epoch_changes = { - let mut epoch_changes = self.epoch_changes.shared_data_locked(); - - // Check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ClientImport( - sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) - .into(), - ) - })? - }; - - let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - - let added_weight = pre_digest.ticket_info.is_some() as u32; - let total_weight = parent_weight + added_weight; - - // Search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some()) { - (true, false) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )), - (false, true) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::UnexpectedEpochChange).into(), - )), - _ => (), - } - - let info = self.client.info(); - - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some((*epoch_changes).clone()); - - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; - - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; - - log!(target: "sassafras", - log_level, - "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); - - let next_epoch = viable_epoch.increment(next_epoch_descriptor); - - log!(target: "sassafras", - log_level, - "🌳 🍁 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); - - // Prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized(self.client.clone(), &mut epoch_changes)?; - - epoch_changes - .import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ) - .map_err(|e| { - ConsensusError::ClientImport(format!( - "Error importing epoch changes: {}", - e - )) - })?; - - Ok(()) - }; - - if let Err(e) = prune_and_import() { - debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); - *epoch_changes = - old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e) - } - - aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { - block - .auxiliary - .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - } - - aux_schema::write_block_weight(hash, total_weight, |values| { - block - .auxiliary - .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - "No block weight for parent header.".to_string(), - ) - })? - }; - - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) - }; - // Release the mutex, but it stays locked - epoch_changes.release_mutex() - }; - - let import_result = self.inner.import_block(block, new_cache).await; - - // Revert to the original epoch changes in case there's an error - // importing the block - if import_result.is_err() { - if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes.upgrade() = old_epoch_changes; - } - } - - import_result.map_err(Into::into) - } - - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).await.map_err(Into::into) - } -} - -/// Gets the best finalized block and its slot, and prunes the given epoch tree. -fn prune_finalized( - client: Arc, - epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> -where - B: BlockT, - C: HeaderBackend + HeaderMetadata, -{ - let info = client.info(); - if info.block_gap.is_none() { - epoch_changes.clear_gap(); + pub fn genesis_config(&self) -> &SassafrasConfiguration { + &self.genesis_config } - - let finalized_slot = { - let finalized_header = client - .header(BlockId::Hash(info.finalized_hash)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect( - "best finalized hash was given by client; finalized headers must exist in db; qed", - ); - - find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; valid blocks have a pre-digest; qed") - .slot - }; - - epoch_changes - .prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - Ok(()) -} - -/// Produce a Sassafras block-import object to be used later on in the construction of -/// an import-queue. -/// -/// Also returns a link object used to correctly instantiate the import queue -/// and background worker. -pub fn block_import( - config: Config, - wrapped_block_import: I, - client: Arc, -) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> -where - C: AuxStore + HeaderBackend + HeaderMetadata + 'static, -{ - let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; - - let link = SassafrasLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; - - // NOTE: this isn't entirely necessary, but since we didn't use to prune the - // epoch tree it is useful as a migration, so that nodes prune long trees on - // startup rather than waiting until importing the next epoch change block. - prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; - - let import = SassafrasBlockImport::new(client, epoch_changes, wrapped_block_import, config); - - Ok((import, link)) } /// Start an import queue for the Sassafras consensus algorithm. @@ -1431,9 +348,9 @@ where /// /// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, /// otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( sassafras_link: SassafrasLink, - block_import: Inner, + block_import: BI, justification_import: Option>, client: Arc, select_chain: SelectChain, @@ -1444,13 +361,6 @@ pub fn import_queue( telemetry: Option, ) -> ClientResult> where - Inner: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync - + 'static, Client: ProvideRuntimeApi + HeaderBackend + HeaderMetadata @@ -1459,20 +369,27 @@ where + Sync + 'static, Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + BI: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = SassafrasVerifier { + let verifier = SassafrasVerifier::new( + client, select_chain, create_inherent_data_providers, - config: sassafras_link.config, - epoch_changes: sassafras_link.epoch_changes, + sassafras_link.epoch_changes, can_author_with, telemetry, - client, - }; + sassafras_link.genesis_config, + ); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index a58caf374ae68..3900a8b8798e9 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -16,17 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Verification for Sassafras headers. - -use super::{authorship, sassafras_err, BlockT, Epoch, Error}; -use sc_consensus_slots::CheckedHeader; -use sp_consensus_sassafras::{ - digests::{CompatibleDigestItem, PreDigest}, - make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, Ticket, -}; -use sp_consensus_slots::Slot; -use sp_core::{ByteArray, Pair}; -use sp_runtime::{traits::Header, DigestItem}; +//! Types and functions related to block verification. + +use super::*; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; @@ -100,11 +92,8 @@ pub fn check_header( // TODO-SASS-P2 ... we can eventually remove auth index from ticket info log::error!(target: "sassafras", "🌳 Wrong primary authority index"); } - let transcript = make_ticket_transcript( - &epoch.randomness, - ticket_info.attempt as u64, - epoch.epoch_index, - ); + let transcript = + make_ticket_transcript(&epoch.randomness, ticket_info.attempt, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; @@ -143,3 +132,300 @@ pub fn check_header( Ok(CheckedHeader::Checked(header, info)) } + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, +} + +impl + SassafrasVerifier +{ + /// Constructor. + pub fn new( + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasVerifier { + client, + select_chain, + create_inherent_data_providers, + epoch_changes, + can_author_with, + telemetry, + genesis_config, + } + } +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "sassafras", + "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let _best_id: BlockId = self + .select_chain + .best_chain() + .await + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // TODO-SASS-P2 + + Ok(()) + } +} + +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> BlockVerificationResult { + trace!( + target: "sassafras", + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } + + trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&block.header)?; + + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let ticket = self + .client + .runtime_api() + .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) + .map_err(|err| err.to_string())?; + + let v_params = VerificationParams { + header: block.header.clone(), + pre_digest, + slot_now, + epoch: viable_epoch.as_ref(), + ticket, + }; + + (check_header::(v_params)?, epoch_descriptor) + }; + + match check_header { + CheckedHeader::Checked(pre_header, verified_info) => { + let sassafras_pre_digest = verified_info + .pre_digest + .as_sassafras_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + let slot = sassafras_pre_digest.slot; + + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &block.header, + &verified_info.author, + &block.origin, + ) + .await + { + warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(slot); + let new_block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + block.post_hash = Some(hash); + + Ok((block, Default::default())) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 245b9fb783813..8b1da6dc98e45 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -203,11 +203,25 @@ pub mod pallet { #[pallet::getter(fn initialized)] pub type Initialized = StorageValue<_, PreDigest>; - /// The configuration for the current epoch. Should never be `None` as it is initialized in - /// genesis. + /// The configuration for the current epoch. #[pallet::storage] + #[pallet::getter(fn config)] pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; + /// The configuration for the next epoch. + #[pallet::storage] + pub type NextEpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// TODO-SASS-P2: better doc? Double check if next epoch tickets were computed using NextEpoch + /// params in the native ecode. + /// In other words a config change submitted during session N will be enacted on session N+2. + /// This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for session N+1. + #[pallet::storage] + pub(super) type PendingEpochConfigChange = StorageValue<_, SassafrasEpochConfiguration>; + /// Stored tickets metadata. #[pallet::storage] pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; @@ -300,6 +314,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Submit next epoch tickets. + /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remov ethe weight? #[pallet::weight(10_000)] pub fn submit_tickets( origin: OriginFor, @@ -317,6 +332,24 @@ pub mod pallet { TicketsMeta::::set(metadata); Ok(()) } + + /// Plan an epoch config change. The epoch config change is recorded and will be enacted on + /// the next call to `enact_epoch_change`. The config will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + #[pallet::weight(10_000)] + pub fn plan_config_change( + origin: OriginFor, + config: SassafrasEpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); + Ok(()) + } } #[pallet::validate_unsigned] @@ -365,10 +398,11 @@ pub mod pallet { // Check tickets are below threshold let next_auth = NextAuthorities::::get(); + let epoch_config = EpochConfig::::get(); let threshold = sp_consensus_sassafras::compute_threshold( - sp_consensus_sassafras::TICKET_REDUNDANCY_FACTOR, + epoch_config.redundancy_factor, epoch_duration as u32, - sp_consensus_sassafras::TICKET_MAX_ATTEMPTS, + epoch_config.attempts_number, next_auth.len() as u32, ); @@ -488,25 +522,24 @@ impl Pallet { // Updates current epoch randomness and computes the *next* epoch randomness. let next_randomness = Self::update_randomness(next_epoch_index); + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config.clone() { + NextEpochConfig::::put(next_config); + } + // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. let next_epoch = NextEpochDescriptor { authorities: next_authorities.to_vec(), randomness: next_randomness, + config: next_config, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - // if let Some(next_config) = NextEpochConfig::::get() { - // EpochConfig::::put(next_config); - // } - - // if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { - // let next_epoch_config: BabeEpochConfiguration = - // pending_epoch_config_change.clone().into(); - // NextEpochConfig::::put(next_epoch_config); - // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); - // } - let epoch_key = (epoch_idx & 1) as u8; let mut tickets_metadata = TicketsMeta::::get(); // Optionally finish sorting @@ -585,6 +618,7 @@ impl Pallet { let next = NextEpochDescriptor { authorities: Self::authorities().to_vec(), randomness: Self::randomness(), + config: None, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 4aa79a2ef23f7..1648a8ff4f230 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -156,6 +156,7 @@ fn on_first_block_after_genesis() { sp_consensus_sassafras::digests::NextEpochDescriptor { authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), + config: None, }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 172cbf2d800f7..eb318a5caa379 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Private implementation details of Sassafras digests. use super::{ - AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, Slot, TicketInfo, - SASSAFRAS_ENGINE_ID, + AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, Slot, TicketInfo, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -46,12 +46,14 @@ pub struct PreDigest { /// Information about the next epoch. This is broadcast in the first block /// of the epoch. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { /// The authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// The value of randomness to use for the slot-assignment. pub randomness: Randomness, + /// Algorithm parameters. If not present, previous epoch parameters are used. + pub config: Option, } /// An consensus log item for BABE. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 11206ed17d80d..67df5f4180ca1 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -83,34 +83,32 @@ pub type SassafrasBlockWeight = u32; /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] -pub struct SassafrasGenesisConfiguration { +pub struct SassafrasConfiguration { /// The slot duration in milliseconds for Sassafras. pub slot_duration: u64, /// The duration of epochs in slots. pub epoch_length: u64, /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// The randomness for the genesis epoch. pub randomness: Randomness, } +impl SassafrasConfiguration { + /// Get the slot duration defined in the genesis configuration. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.slot_duration) + } +} + /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct SassafrasEpochConfiguration { - // TODO-SASS-P2 - // x: redundancy_factor - // a: attempts number - // L: bound on aa number of tickets that can be gossiped -} - -// Sensible defaults for Sassafras epoch configuration. -impl Default for SassafrasEpochConfiguration { - fn default() -> Self { - SassafrasEpochConfiguration { - // TODO-SASS-P2 - } - } + /// Redundancy factor. + pub redundancy_factor: u32, + /// Number of attempts for tickets generation. + pub attempts_number: u32, } /// Ticket type. @@ -196,7 +194,7 @@ sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. - fn configuration() -> SassafrasGenesisConfiguration; + fn configuration() -> SassafrasConfiguration; /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. @@ -230,9 +228,3 @@ pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { U256::from(ticket.as_bytes()) < threshold } - -/// TODO-SASS-P3: add to session config -pub const TICKET_MAX_ATTEMPTS: u32 = 30; - -/// TODO-SASS-P3: add to session config -pub const TICKET_REDUNDANCY_FACTOR: u32 = 1; From a7fb94aacf45d5eca8534776da44a614fb03261d Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 13:41:35 +0200 Subject: [PATCH 10/14] Use ticket params found in the epoch descriptior --- Cargo.lock | 1 - bin/node-sassafras/runtime/src/lib.rs | 1 + client/consensus/sassafras/src/authorship.rs | 35 +++--- .../consensus/sassafras/src/block_import.rs | 5 +- client/consensus/sassafras/src/lib.rs | 29 ++--- frame/sassafras/Cargo.toml | 2 - frame/sassafras/src/lib.rs | 59 +++------- primitives/consensus/sassafras/src/lib.rs | 111 ++++-------------- primitives/consensus/sassafras/src/vrf.rs | 92 +++++++++++++++ 9 files changed, 162 insertions(+), 173 deletions(-) create mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index 579f7b815984e..52c54865cfeae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6260,7 +6260,6 @@ dependencies = [ "scale-info", "sp-application-crypto", "sp-consensus-sassafras", - "sp-consensus-vrf", "sp-core", "sp-io", "sp-runtime", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 69e66c1c198d9..0bedf09e08794 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -405,6 +405,7 @@ impl_runtime_apis! { epoch_length: EpochDuration::get(), authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), + threshold_params: Sassafras::config(), } } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 7d7059022ec6b..ab280908019da 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -21,21 +21,21 @@ use super::*; use sp_consensus_sassafras::{ - digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, - Ticket, TicketInfo, + digests::PreDigest, + vrf::{make_slot_transcript_data, make_ticket_transcript_data}, + AuthorityId, Slot, Ticket, TicketInfo, }; use sp_core::{twox_64, ByteArray}; /// Get secondary authority index for the given epoch and slot. -#[inline] -pub fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { +pub(crate) fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) % epoch.authorities.len() as u64 } /// Try to claim an epoch slot. /// If ticket is `None`, then the slot should be claimed using the fallback mechanism. -pub fn claim_slot( +fn claim_slot( slot: Slot, epoch: &Epoch, ticket: Option, @@ -59,23 +59,24 @@ pub fn claim_slot( let authority_id = epoch.authorities.get(authority_index as usize).map(|auth| &auth.0)?; let transcript_data = make_slot_transcript_data(&epoch.randomness, slot, epoch.epoch_index); - let result = SyncCryptoStore::sr25519_vrf_sign( + let signature = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, authority_id.as_ref(), transcript_data, - ); + ) + .ok() + .flatten()?; + + let pre_digest = PreDigest { + authority_index: authority_index as u32, + slot, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof.clone()), + ticket_info, + }; - result.ok().flatten().map(|signature| { - let pre_digest = PreDigest { - authority_index: authority_index as u32, - slot, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof.clone()), - ticket_info, - }; - (pre_digest, authority_id.clone()) - }) + Some((pre_digest, authority_id.clone())) } /// Generate the tickets for the given epoch. diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index dddeb2155c0a3..3630589aeb46a 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -164,7 +164,7 @@ where let info = self.client.info(); - if let Some(mut next_epoch_descriptor) = next_epoch_digest { + if let Some(next_epoch_descriptor) = next_epoch_digest { old_epoch_changes = Some((*epoch_changes).clone()); let viable_epoch = epoch_changes @@ -191,9 +191,6 @@ where viable_epoch.as_ref().start_slot, ); - if next_epoch_descriptor.config.is_none() { - next_epoch_descriptor.config = Some(viable_epoch.as_ref().config.clone()); - } let next_epoch = viable_epoch.increment(next_epoch_descriptor); log!(target: "sassafras", diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index b62baa6618e9c..4f08d84862c18 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -85,10 +85,10 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, - make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, AuthoritySignature, - SassafrasApi, SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, - Ticket, TicketInfo, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, - VRF_PROOF_LENGTH, + vrf::{make_slot_transcript, make_ticket_transcript}, + AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, + SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, TicketInfo, VRFOutput, VRFProof, + SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; mod authorship; @@ -101,7 +101,6 @@ pub use block_import::{block_import, SassafrasBlockImport}; pub use verification::SassafrasVerifier; /// Errors encountered by the Sassafras routines. -/// TODO-SASS-P2: remove unused errors. #[derive(Debug, thiserror::Error)] pub enum Error { /// Multiple Sassafras pre-runtime digests @@ -217,7 +216,7 @@ impl EpochT for Epoch { duration: self.duration, authorities: descriptor.authorities, randomness: descriptor.randomness, - config: descriptor.config.expect("configuration should have been set"), + config: descriptor.config.unwrap_or(self.config.clone()), tickets_info: BTreeMap::new(), } } @@ -241,26 +240,24 @@ impl Epoch { duration: config.epoch_length, authorities: config.authorities.clone(), randomness: config.randomness, - config: SassafrasEpochConfiguration::default(), + config: config.threshold_params.clone(), tickets_info: BTreeMap::new(), } } } -/// TODO-SASS-P2 +/// Read latest finalized protocol configuration. pub fn configuration(client: &C) -> ClientResult where B: BlockT, - // TODO-SASS-P2: we require all these bunds? - C: AuxStore + ProvideRuntimeApi + UsageProvider, + C: ProvideRuntimeApi + UsageProvider, C::Api: SassafrasApi, { - let hash = if client.usage_info().chain.finalized_state.is_some() { - client.usage_info().chain.best_hash - } else { - debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); - client.usage_info().chain.genesis_hash - }; + let info = client.usage_info().chain; + let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { + debug!(target: "sassafras", "🌳 Reading config from genesis"); + info.genesis_hash + }); let config = client.runtime_api().configuration(&BlockId::Hash(hash))?; Ok(config) diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index b41803cd3bbef..fc0c1940cc50d 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -23,7 +23,6 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -46,7 +45,6 @@ std = [ "scale-info/std", "sp-application-crypto/std", "sp-consensus-sassafras/std", - "sp-consensus-vrf/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 8b1da6dc98e45..1590c88d5d65d 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -52,8 +52,13 @@ use scale_info::TypeInfo; use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; -use sp_application_crypto::ByteArray; -use sp_consensus_vrf::schnorrkel; +//use sp_application_crypto::ByteArray; +//use sp_consensus_vrf::schnorrkel; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, + AuthorityId, Randomness, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, + SASSAFRAS_ENGINE_ID, +}; use sp_runtime::{ generic::DigestItem, traits::{One, Saturating}, @@ -61,12 +66,6 @@ use sp_runtime::{ }; use sp_std::prelude::Vec; -pub use sp_consensus_sassafras::{ - digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, - PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, -}; - #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(all(feature = "std", test))] @@ -187,15 +186,15 @@ pub mod pallet { /// adversary, for purposes such as public-coin zero-knowledge proofs. #[pallet::storage] #[pallet::getter(fn randomness)] - pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; /// Next epoch randomness. #[pallet::storage] - pub type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; /// Randomness accumulator. #[pallet::storage] - pub type RandomnessAccumulator = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. @@ -275,39 +274,9 @@ pub mod pallet { // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has // already occurred at this point, so the - // - // TODO-SASS-P2 - // under-construction randomness - // will only contain outputs from the right epoch. let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - - let randomness = Authorities::::get() - .get(pre_digest.authority_index as usize) - .and_then(|(authority, _)| { - schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() - }) - .and_then(|pubkey| { - let transcript = sp_consensus_sassafras::make_slot_transcript( - &Self::randomness(), - Self::current_slot(), - EpochIndex::::get(), - ); - - // This has already been verified by the client on block import. - debug_assert!(pubkey - .vrf_verify( - transcript.clone(), - &pre_digest.vrf_output, - &pre_digest.vrf_proof - ) - .is_ok()); - - Some(pre_digest.vrf_output.to_bytes()) - }) - .expect("Pre-digest contains valid randomness; qed"); - - Self::deposit_randomness(&randomness); + Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); } } @@ -554,9 +523,9 @@ impl Pallet { /// Call this function on epoch change to update the randomness. /// Returns the next epoch randomness. - fn update_randomness(next_epoch_index: u64) -> schnorrkel::Randomness { + fn update_randomness(next_epoch_index: u64) -> Randomness { let curr_randomness = NextRandomness::::get(); - Randomness::::put(curr_randomness); + CurrentRandomness::::put(curr_randomness); let accumulator = RandomnessAccumulator::::get(); let mut s = Vec::with_capacity(2 * curr_randomness.len() + 8); @@ -590,7 +559,7 @@ impl Pallet { >::deposit_log(log) } - fn deposit_randomness(randomness: &schnorrkel::Randomness) { + fn deposit_randomness(randomness: &Randomness) { let mut s = RandomnessAccumulator::::get().to_vec(); s.extend_from_slice(randomness); let accumulator = sp_io::hashing::blake2_256(&s); diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 67df5f4180ca1..e9bf510fc3cd6 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -22,35 +22,32 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -pub use merlin::Transcript; - use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_core::{crypto, U256}; -#[cfg(feature = "std")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRFInOut, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, + PublicKey, Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; -/// Key type for Sassafras module. -pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; - pub mod digests; pub mod inherents; +pub mod vrf; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; app_crypto!(sr25519, SASSAFRAS); } +/// Key type for Sassafras protocol. +pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + /// The index of an authority. pub type AuthorityIndex = u32; @@ -92,6 +89,8 @@ pub struct SassafrasConfiguration { pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// The randomness for the genesis epoch. pub randomness: Randomness, + /// Threshold params + pub threshold_params: SassafrasEpochConfiguration, } impl SassafrasConfiguration { @@ -125,86 +124,6 @@ pub struct TicketInfo { pub proof: VRFProof, } -const TYPE_LABEL: &str = "type"; -const EPOCH_LABEL: &str = "epoch"; -const SLOT_LABEL: &str = "slot"; -const ATTEMPT_LABEL: &str = "slot"; -const RANDOMNESS_LABEL: &str = "randomness"; - -const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; -const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; - -/// Make slot VRF transcript. -pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); - transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); - transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); - transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); - transcript -} - -/// Make slot VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_slot_transcript_data( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), - (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), - (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), - (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} - -/// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); - transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); - transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); - transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); - transcript -} - -/// Make ticket VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_ticket_transcript_data( - randomness: &Randomness, - attempt: u32, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), - (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), - (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), - (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} - -sp_api::decl_runtime_apis! { - /// API necessary for block authorship with Sassafras. - pub trait SassafrasApi { - /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. - fn configuration() -> SassafrasConfiguration; - - /// Submit next epoch validator tickets via an unsigned extrinsic. - /// This method returns `false` when creation of the extrinsics fails. - fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; - - /// Get expected ticket for the given slot. - fn slot_ticket(slot: Slot) -> Option; - } -} - /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: /// - x: redundancy factor; /// - s: number of slots in epoch; @@ -228,3 +147,19 @@ pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { U256::from(ticket.as_bytes()) < threshold } + +// Runtime API. +sp_api::decl_runtime_apis! { + /// API necessary for block authorship with Sassafras. + pub trait SassafrasApi { + /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. + fn configuration() -> SassafrasConfiguration; + + /// Submit next epoch validator tickets via an unsigned extrinsic. + /// This method returns `false` when creation of the extrinsics fails. + fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; + + /// Get expected ticket for the given slot. + fn slot_ticket(slot: Slot) -> Option; + } +} diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..1c46fe77a6c6e --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives related to VRF input and output. + +pub use merlin::Transcript; + +pub use sp_consensus_slots::Slot; +pub use sp_consensus_vrf::schnorrkel::{PublicKey, Randomness, VRFOutput, VRFProof}; +#[cfg(feature = "std")] +use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; + +use crate::SASSAFRAS_ENGINE_ID; + +const TYPE_LABEL: &str = "type"; +const EPOCH_LABEL: &str = "epoch"; +const SLOT_LABEL: &str = "slot"; +const ATTEMPT_LABEL: &str = "slot"; +const RANDOMNESS_LABEL: &str = "randomness"; + +const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; +const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; + +/// Make slot VRF transcript. +pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make slot VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_slot_transcript_data( + randomness: &Randomness, + slot: Slot, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), + (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} + +/// Make ticket VRF transcript. +pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make ticket VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_ticket_transcript_data( + randomness: &Randomness, + attempt: u32, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), + (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} From d0b259cf1b1916414aa39833e7f2fc45f505ec68 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 14:20:06 +0200 Subject: [PATCH 11/14] Configuration sharing between primitives and client code --- bin/node-sassafras/runtime/src/lib.rs | 2 +- client/consensus/sassafras/src/authorship.rs | 28 +++++++++-------- client/consensus/sassafras/src/lib.rs | 31 ++++++++----------- .../consensus/sassafras/src/verification.rs | 9 +++--- primitives/consensus/sassafras/src/lib.rs | 10 +++--- 5 files changed, 39 insertions(+), 41 deletions(-) diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 0bedf09e08794..75dcd204a96d8 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -402,7 +402,7 @@ impl_runtime_apis! { fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { sp_consensus_sassafras::SassafrasConfiguration { slot_duration: Sassafras::slot_duration(), - epoch_length: EpochDuration::get(), + epoch_duration: EpochDuration::get(), authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), threshold_params: Sassafras::config(), diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index ab280908019da..8f1aa1115d2a5 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -28,9 +28,9 @@ use sp_consensus_sassafras::{ use sp_core::{twox_64, ByteArray}; /// Get secondary authority index for the given epoch and slot. -pub(crate) fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { - u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) % - epoch.authorities.len() as u64 +pub(crate) fn secondary_authority_index(slot: Slot, config: &SassafrasConfiguration) -> u64 { + u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) % + config.authorities.len() as u64 } /// Try to claim an epoch slot. @@ -41,6 +41,7 @@ fn claim_slot( ticket: Option, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { + let config = &epoch.config; let (authority_index, ticket_info) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); @@ -52,13 +53,13 @@ fn claim_slot( }, None => { log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); - (secondary_authority_index(slot, epoch), None) + (secondary_authority_index(slot, config), None) }, }; - let authority_id = epoch.authorities.get(authority_index as usize).map(|auth| &auth.0)?; + let authority_id = config.authorities.get(authority_index as usize).map(|auth| &auth.0)?; - let transcript_data = make_slot_transcript_data(&epoch.randomness, slot, epoch.epoch_index); + let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_index); let signature = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, @@ -83,20 +84,21 @@ fn claim_slot( /// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` /// structure. The additional information will be used during epoch to claim slots. pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { + let config = &epoch.config; + let max_attempts = config.threshold_params.attempts_number; + let redundancy_factor = config.threshold_params.redundancy_factor; let mut tickets = vec![]; - let max_attempts = epoch.config.attempts_number; - let redundancy_factor = epoch.config.redundancy_factor; let threshold = sp_consensus_sassafras::compute_threshold( redundancy_factor, - epoch.duration as u32, + config.epoch_duration as u32, max_attempts, - epoch.authorities.len() as u32, + config.authorities.len() as u32, ); // TODO-SASS-P4 remove me log::debug!(target: "sassafras", "🌳 Tickets threshold: {:032x}", threshold); - let authorities = epoch.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); + let authorities = config.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); for (authority_index, authority_id) in authorities { if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) { @@ -105,7 +107,7 @@ pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) let make_ticket = |attempt| { let transcript_data = - make_ticket_transcript_data(&epoch.randomness, attempt, epoch.epoch_index); + make_ticket_transcript_data(&config.randomness, attempt, epoch.epoch_index); // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. @@ -206,7 +208,7 @@ where self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) - .map(|epoch| epoch.as_ref().authorities.len()) + .map(|epoch| epoch.as_ref().config.authorities.len()) } async fn claim_slot( diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4f08d84862c18..4acf984003aaa 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -193,14 +193,8 @@ pub struct Epoch { pub epoch_index: u64, /// The starting slot of the epoch. pub start_slot: Slot, - /// The duration of this epoch in slots. - pub duration: u64, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], - /// Configuration parameters of the epoch. - pub config: SassafrasEpochConfiguration, + /// Epoch configuration + pub config: SassafrasConfiguration, /// Tickets metadata. pub tickets_info: BTreeMap, } @@ -210,13 +204,17 @@ impl EpochT for Epoch { type Slot = Slot; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, + let config = SassafrasConfiguration { + slot_duration: self.config.slot_duration, + epoch_duration: self.config.epoch_duration, authorities: descriptor.authorities, randomness: descriptor.randomness, - config: descriptor.config.unwrap_or(self.config.clone()), + threshold_params: descriptor.config.unwrap_or(self.config.threshold_params.clone()), + }; + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.config.slot_duration, + config, tickets_info: BTreeMap::new(), } } @@ -226,7 +224,7 @@ impl EpochT for Epoch { } fn end_slot(&self) -> Slot { - self.start_slot + self.duration + self.start_slot + self.config.slot_duration } } @@ -237,10 +235,7 @@ impl Epoch { Epoch { epoch_index: 0, start_slot: slot, - duration: config.epoch_length, - authorities: config.authorities.clone(), - randomness: config.randomness, - config: config.threshold_params.clone(), + config: config.clone(), tickets_info: BTreeMap::new(), } } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 3900a8b8798e9..b162fe390ef03 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -56,13 +56,14 @@ pub fn check_header( params: VerificationParams, ) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch, ticket } = params; + let config = &epoch.config; // Check that the slot is not in the future, with some drift being allowed. if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let author = match epoch.authorities.get(pre_digest.authority_index as usize) { + let author = match config.authorities.get(pre_digest.authority_index as usize) { Some(author) => author.0.clone(), None => return Err(sassafras_err(Error::SlotAuthorNotFound)), }; @@ -93,14 +94,14 @@ pub fn check_header( log::error!(target: "sassafras", "🌳 Wrong primary authority index"); } let transcript = - make_ticket_transcript(&epoch.randomness, ticket_info.attempt, epoch.epoch_index); + make_ticket_transcript(&config.randomness, ticket_info.attempt, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { log::debug!(target: "sassafras", "🌳 checking secondary"); - let idx = authorship::secondary_authority_index(pre_digest.slot, params.epoch); + let idx = authorship::secondary_authority_index(pre_digest.slot, config); if idx != pre_digest.authority_index as u64 { log::error!(target: "sassafras", "🌳 Wrong secondary authority index"); } @@ -119,7 +120,7 @@ pub fn check_header( // Check slot-vrf proof - let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index e9bf510fc3cd6..4754081fbc126 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -81,15 +81,15 @@ pub type SassafrasBlockWeight = u32; /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] pub struct SassafrasConfiguration { - /// The slot duration in milliseconds for Sassafras. + /// The slot duration in milliseconds. pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: u64, - /// The authorities for the genesis epoch. + pub epoch_duration: u64, + /// The authorities for the epoch. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// The randomness for the genesis epoch. + /// The randomness for the epoch. pub randomness: Randomness, - /// Threshold params + /// Tickets threshold parameters. pub threshold_params: SassafrasEpochConfiguration, } From 64198fe50ab5636da2f4cb1067df4d3929d64fc2 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 18:14:40 +0200 Subject: [PATCH 12/14] Fix next session slot computation --- client/consensus/sassafras/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4acf984003aaa..d81b8788fbae9 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -213,7 +213,7 @@ impl EpochT for Epoch { }; Epoch { epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.config.slot_duration, + start_slot: self.start_slot + config.epoch_duration, config, tickets_info: BTreeMap::new(), } From 5143c4a36bb375f836bd95dfdd19c215778c1e58 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 18:36:01 +0200 Subject: [PATCH 13/14] Integration of session pallet --- Cargo.lock | 1 + bin/node-sassafras/node/Cargo.toml | 12 ++- bin/node-sassafras/node/src/chain_spec.rs | 62 ++++++------ bin/node-sassafras/runtime/Cargo.toml | 3 + bin/node-sassafras/runtime/src/lib.rs | 59 ++++++++--- frame/sassafras/src/lib.rs | 34 ++++--- frame/sassafras/src/session.rs | 114 ++++++++++++++++++++++ 7 files changed, 228 insertions(+), 57 deletions(-) create mode 100644 frame/sassafras/src/session.rs diff --git a/Cargo.lock b/Cargo.lock index 52c54865cfeae..cf1eb38e6d2fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4936,6 +4936,7 @@ dependencies = [ "pallet-balances", "pallet-grandpa", "pallet-sassafras", + "pallet-session", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index c99e7bf5ef0ed..9133c2141c837 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -66,7 +66,15 @@ substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build [features] default = [] -runtime-benchmarks = ["node-sassafras-runtime/runtime-benchmarks"] +runtime-benchmarks = [ + "node-sassafras-runtime/runtime-benchmarks" +] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-sassafras-runtime/try-runtime", "try-runtime-cli"] +try-runtime = [ + "node-sassafras-runtime/try-runtime", + "try-runtime-cli" +] +use-session-pallet = [ + "node-sassafras-runtime/use-session-pallet" +] diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 17fefdee11fb1..965fc197277c8 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -2,6 +2,8 @@ use node_sassafras_runtime::{ AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; +#[cfg(feature = "use-session-pallet")] +use node_sassafras_runtime::{SessionConfig, SessionKeys}; use sc_service::ChainType; use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; use sp_core::{sr25519, Pair, Public}; @@ -24,7 +26,7 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; -/// Generate an account ID from seed. +/// Generate an account id from seed. pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic: From<::Public>, @@ -32,47 +34,40 @@ where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Generate authority keys from seed. -pub fn authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { - (get_from_seed::(s), get_from_seed::(s)) +/// Generate authority account id and keys from seed. +pub fn authority_keys_from_seed(seed: &str) -> (AccountId, SassafrasId, GrandpaId) { + ( + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) } pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Development", - // ID "dev", ChainType::Development, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, None, - // Properties None, - // Extensions None, )) } @@ -81,19 +76,14 @@ pub fn local_testnet_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Local Testnet", - // ID "local_testnet", ChainType::Local, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -108,19 +98,13 @@ pub fn local_testnet_config() -> Result { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, - // Properties None, None, - // Extensions None, )) } @@ -128,10 +112,9 @@ pub fn local_testnet_config() -> Result { /// Configure initial storage state for FRAME modules. fn testnet_genesis( wasm_binary: &[u8], - initial_authorities: Vec<(SassafrasId, GrandpaId)>, + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, - _enable_println: bool, ) -> GenesisConfig { GenesisConfig { system: SystemConfig { @@ -143,19 +126,38 @@ fn testnet_genesis( balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, sassafras: SassafrasConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 0)).collect(), epoch_config: SassafrasEpochConfiguration { attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, }, }, grandpa: GrandpaConfig { - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect(), }, sudo: SudoConfig { // Assign network admin rights. key: Some(root_key), }, transaction_payment: Default::default(), + #[cfg(feature = "use-session-pallet")] + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + SessionKeys { sassafras: x.1.clone(), grandpa: x.2.clone() }, + ) + }) + .collect::>(), + }, } } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 3bcd35d8b020c..823e1dc2bd4eb 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -17,6 +17,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } @@ -62,6 +63,7 @@ std = [ "pallet-balances/std", "pallet-grandpa/std", "pallet-sudo/std", + "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -99,3 +101,4 @@ try-runtime = [ "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", ] +use-session-pallet = [] diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 75dcd204a96d8..c428931e99dbe 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -8,6 +8,8 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(feature = "use-session-pallet")] +use sp_runtime::traits::OpaqueKeys; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, @@ -70,12 +72,12 @@ pub mod opaque { pub type Block = generic::Block; /// Opaque block identifier type. pub type BlockId = generic::BlockId; +} - impl_opaque_keys! { - pub struct SessionKeys { - pub sassafras: Sassafras, - pub grandpa: Grandpa, - } +impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, } } @@ -215,6 +217,9 @@ parameter_types! { impl pallet_sassafras::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; + #[cfg(feature = "use-session-pallet")] + type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + #[cfg(not(feature = "use-session-pallet"))] type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; type MaxAuthorities = ConstU32; type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; @@ -236,7 +241,6 @@ impl pallet_grandpa::Config for Runtime { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; @@ -244,13 +248,11 @@ impl pallet_timestamp::Config for Runtime { } impl pallet_balances::Config for Runtime { + type Event = Event; type MaxLocks = ConstU32<50>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - /// The type for recording an account's balance. type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU128<500>; type AccountStore = System; @@ -271,7 +273,40 @@ impl pallet_sudo::Config for Runtime { type Call = Call; } -// Create the runtime by composing the FRAME pallets that were previously configured. +#[cfg(feature = "use-session-pallet")] +impl pallet_session::Config for Runtime { + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); //pallet_staking::StashOf; + type ShouldEndSession = Sassafras; + type NextSessionRotation = Sassafras; + type SessionManager = (); //pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = pallet_session::weights::SubstrateWeight; +} + +// Create a runtime using session pallet +#[cfg(feature = "use-session-pallet")] +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + Session: pallet_session, + } +); + +// Create a runtime NOT using session pallet +#[cfg(not(feature = "use-session-pallet"))] construct_runtime!( pub enum Runtime where Block = Block, @@ -422,13 +457,13 @@ impl_runtime_apis! { impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { - opaque::SessionKeys::generate(seed) + SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec, ) -> Option, KeyTypeId)>> { - opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + SessionKeys::decode_into_raw_public_keys(&encoded) } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 1590c88d5d65d..31678a6199ec7 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -52,8 +52,6 @@ use scale_info::TypeInfo; use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; -//use sp_application_crypto::ByteArray; -//use sp_consensus_vrf::schnorrkel; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, AuthorityId, Randomness, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, @@ -73,9 +71,11 @@ mod mock; #[cfg(all(feature = "std", test))] mod tests; +pub mod session; + pub use pallet::*; -/// Tickets related metadata that are commonly used together. +/// Tickets related metadata that is commonly used together. #[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] pub struct TicketsMetadata { /// Number of tickets available for even and odd session indices respectivelly. @@ -303,7 +303,7 @@ pub mod pallet { } /// Plan an epoch config change. The epoch config change is recorded and will be enacted on - /// the next call to `enact_epoch_change`. The config will be activated one epoch after. + /// the next call to `enact_session_change`. The config will be activated one epoch after. /// Multiple calls to this method will replace any existing planned config change that had /// not been enacted yet. #[pallet::weight(10_000)] @@ -416,7 +416,7 @@ impl Pallet { /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_epoch_change(now: T::BlockNumber) -> bool { + pub fn should_end_session(now: T::BlockNumber) -> bool { // The epoch has technically ended during the passage of time between this block and the // last, but we have to "end" the epoch now, since there is no earlier possible block we // could have done it. @@ -444,7 +444,7 @@ impl Pallet { *slot.saturating_sub(Self::current_epoch_start()) } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_session` /// has returned `true`, and the caller is the only caller of this function. /// /// Typically, this is not handled directly by the user, but by higher-level validator-set @@ -454,7 +454,7 @@ impl Pallet { /// If we detect one or more skipped epochs the policy is to use the authorities and values /// from the first skipped epoch. /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. - pub(crate) fn enact_epoch_change( + pub(crate) fn enact_session_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< (AuthorityId, SassafrasAuthorityWeight), @@ -567,11 +567,19 @@ impl Pallet { } // Initialize authorities on genesis phase. - // TODO-SASS-P2: temporary fix to make the compiler happy - #[allow(dead_code)] fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { - assert!(!authorities.is_empty()); - assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // If this function has already been called with some authorities, then the new list + // should be match the previously set one. + let prev_authorities = Authorities::::get(); + if !prev_authorities.is_empty() { + if prev_authorities.to_vec() == authorities { + return + } else { + panic!("Authorities already were already initialized"); + } + } + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) .expect("Initial number of authorities should be lower than T::MaxAuthorities"); @@ -784,11 +792,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { + if >::should_end_session(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities); + >::enact_session_change(authorities, next_authorities); } } } diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs new file mode 100644 index 0000000000000..15cdab95d8887 --- /dev/null +++ b/frame/sassafras/src/session.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras implementation of traits required by session pallet. + +use super::*; +use frame_support::traits::{EstimateNextSessionRotation, OneSessionHandler}; +use pallet_session::ShouldEndSession; +use sp_runtime::{traits::SaturatedConversion, Permill}; + +impl ShouldEndSession for Pallet { + fn should_end_session(now: T::BlockNumber) -> bool { + // It might be (and it is in current implementation) that session module is calling + // `should_end_session` from it's own `on_initialize` handler, in which case it's + // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we + // have initialized the pallet and updated the current slot. + Self::initialize(now); + Self::should_end_session(now) + } +} + +impl OneSessionHandler for Pallet { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_genesis_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self::enact_session_change(bounded_authorities, next_bounded_authorities) + } + + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) + } +} + +impl EstimateNextSessionRotation for Pallet { + fn average_session_length() -> T::BlockNumber { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + let progress = Permill::from_rational(*elapsed, T::EpochDuration::get()); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (Some(progress), T::DbWeight::get().reads(3)) + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an internal error and should + /// not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // + // ## IMPORTANT NOTE + // + // This implementation is linked to how [`should_session_change`] is working. This might need + // to be updated accordingly, if the underlying mechanics of slot and epochs change. + fn estimate_next_session_rotation(now: T::BlockNumber) -> (Option, Weight) { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (upper_bound, T::DbWeight::get().reads(3)) + } +} From 61619eb8640d92eeebd556fd65e246271bda7c3c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 18:59:47 +0200 Subject: [PATCH 14/14] Fix frame tests --- frame/sassafras/src/mock.rs | 25 +++++++++---------------- frame/sassafras/src/tests.rs | 18 +++--------------- 2 files changed, 12 insertions(+), 31 deletions(-) diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 120120b84e7eb..25ef4f61fb881 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -17,32 +17,25 @@ //! Test utilities for Sassafras pallet. -// TODO-SASS-P2 remove -#![allow(unused_imports)] - -use crate::{self as pallet_sassafras, Authorities, Config, SameAuthoritiesForever}; +use crate::{self as pallet_sassafras, SameAuthoritiesForever}; use frame_support::{ parameter_types, - traits::{ - ConstU128, ConstU32, ConstU64, GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize, - }, + traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}, }; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityId, AuthorityIndex, AuthorityPair, Slot, + digests::PreDigest, + vrf::{self, VRFOutput, VRFProof}, + AuthorityIndex, AuthorityPair, Slot, }; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::{ - crypto::{IsWrappedBy, KeyTypeId, Pair}, + crypto::{IsWrappedBy, Pair}, H256, U256, }; use sp_runtime::{ - curve::PiecewiseLinear, - impl_opaque_keys, testing::{Digest, DigestItem, Header, TestXt}, - traits::{Header as _, IdentityLookup, OpaqueKeys}, - Perbill, + traits::IdentityLookup, }; const EPOCH_DURATION: u64 = 10; @@ -156,7 +149,7 @@ fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput randomness = crate::NextRandomness::::get(); } - let transcript = sp_consensus_sassafras::make_ticket_transcript(&randomness, attempt, epoch); + let transcript = vrf::make_ticket_transcript(&randomness, attempt, epoch); let inout = pair.vrf_sign(transcript); let output = VRFOutput(inout.0.to_output()); let proof = VRFProof(inout.1); @@ -184,7 +177,7 @@ fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { randomness = crate::NextRandomness::::get(); } - let transcript = sp_consensus_sassafras::make_slot_transcript(&randomness, slot, epoch); + let transcript = vrf::make_slot_transcript(&randomness, slot, epoch); let inout = pair.vrf_sign(transcript); let output = VRFOutput(inout.0.to_output()); let proof = VRFProof(inout.1); diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 1648a8ff4f230..3eadff59cdd6f 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -17,26 +17,13 @@ //! Tests for Sassafras pallet. -// TODO-SASS-P2 remove -#![allow(unused_imports)] - use crate::*; use mock::*; -use frame_support::{ - assert_err, assert_noop, assert_ok, - dispatch::EncodeLike, - traits::{ConstU32, Currency, EstimateNextSessionRotation, OnFinalize, OnInitialize}, - weights::{GetDispatchInfo, Pays}, - BoundedBTreeSet, -}; +use frame_support::traits::{OnFinalize, OnInitialize}; use hex_literal::hex; -use pallet_session::ShouldEndSession; -use sp_consensus_sassafras::{SassafrasEpochConfiguration, Slot}; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::crypto::Pair; +use sp_consensus_sassafras::Slot; use sp_runtime::traits::Get; -use std::collections::BTreeSet; #[test] fn slot_ticket_fetch() { @@ -285,6 +272,7 @@ fn epoch_change_block() { sp_consensus_sassafras::digests::NextEpochDescriptor { authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), + config: None, }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode());