From 387c6ed30c5fb95e32b2bfefc07cbe3e4d6be7ef Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 21 Jul 2022 10:38:02 +0200 Subject: [PATCH 01/62] Sassafras consensus - Prototype 1 (#11678) * Node binary derived from the `node-template`. * No fancy pallets only stuff strictly necessary to have something "that works". * Implementation of the very basic components of the Sassafras protocol (frame, primitives and client). * Static validators set. * Block randomness accumulator for next epoch randomness seed. * Tickets generation initially performed via Schnorrkel VRF (instead of Ring VRF). * Tickets on-chain publication directly by the author (no proxy) via unsigned extrinsics * Tickets outside-in sort strategy . * Aura-like fallback for empty slots. This address the genesis warm-up phase as well. * Block verification for primary and secondary method --- Cargo.lock | 153 ++ Cargo.toml | 5 + bin/node-sassafras/.editorconfig | 16 + bin/node-sassafras/node/Cargo.toml | 72 + bin/node-sassafras/node/build.rs | 7 + bin/node-sassafras/node/src/chain_spec.rs | 158 ++ bin/node-sassafras/node/src/cli.rs | 49 + bin/node-sassafras/node/src/command.rs | 133 ++ bin/node-sassafras/node/src/main.rs | 13 + bin/node-sassafras/node/src/rpc.rs | 57 + bin/node-sassafras/node/src/service.rs | 353 ++++ bin/node-sassafras/runtime/Cargo.toml | 103 ++ bin/node-sassafras/runtime/build.rs | 9 + bin/node-sassafras/runtime/src/lib.rs | 562 +++++++ client/consensus/sassafras/Cargo.toml | 43 + client/consensus/sassafras/src/authorship.rs | 185 ++ client/consensus/sassafras/src/aux_schema.rs | 101 ++ client/consensus/sassafras/src/lib.rs | 1481 +++++++++++++++++ .../consensus/sassafras/src/verification.rs | 147 ++ client/consensus/slots/src/lib.rs | 2 +- frame/sassafras/Cargo.toml | 54 + frame/sassafras/README.md | 16 + frame/sassafras/src/lib.rs | 710 ++++++++ primitives/consensus/sassafras/Cargo.toml | 51 + primitives/consensus/sassafras/src/digests.rs | 101 ++ .../consensus/sassafras/src/inherents.rs | 102 ++ primitives/consensus/sassafras/src/lib.rs | 192 +++ primitives/consensus/vrf/src/schnorrkel.rs | 4 +- primitives/core/src/crypto.rs | 2 + primitives/keystore/src/vrf.rs | 2 + 30 files changed, 4880 insertions(+), 3 deletions(-) create mode 100644 bin/node-sassafras/.editorconfig create mode 100644 bin/node-sassafras/node/Cargo.toml create mode 100644 bin/node-sassafras/node/build.rs create mode 100644 bin/node-sassafras/node/src/chain_spec.rs create mode 100644 bin/node-sassafras/node/src/cli.rs create mode 100644 bin/node-sassafras/node/src/command.rs create mode 100644 bin/node-sassafras/node/src/main.rs create mode 100644 bin/node-sassafras/node/src/rpc.rs create mode 100644 bin/node-sassafras/node/src/service.rs create mode 100644 bin/node-sassafras/runtime/Cargo.toml create mode 100644 bin/node-sassafras/runtime/build.rs create mode 100644 bin/node-sassafras/runtime/src/lib.rs create mode 100644 client/consensus/sassafras/Cargo.toml create mode 100644 client/consensus/sassafras/src/authorship.rs create mode 100644 client/consensus/sassafras/src/aux_schema.rs create mode 100644 client/consensus/sassafras/src/lib.rs create mode 100644 client/consensus/sassafras/src/verification.rs create mode 100644 frame/sassafras/Cargo.toml create mode 100644 frame/sassafras/README.md create mode 100644 frame/sassafras/src/lib.rs create mode 100644 primitives/consensus/sassafras/Cargo.toml create mode 100644 primitives/consensus/sassafras/src/digests.rs create mode 100644 primitives/consensus/sassafras/src/inherents.rs create mode 100644 primitives/consensus/sassafras/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 279864e056b0b..f1819c85d7bef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4879,6 +4879,84 @@ dependencies = [ "node-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.1.0" +dependencies = [ + "clap 3.1.18", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-sassafras", + "sc-executor", + "sc-finality-grandpa", + "sc-keystore", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "pallet-balances", + "pallet-grandpa", + "pallet-randomness-collective-flip", + "pallet-sassafras", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -6168,6 +6246,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", + "sp-consensus-vrf", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -8276,6 +8375,39 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.1.0" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.0", + "retain_mut", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-telemetry", + "schnorrkel", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "substrate-prometheus-endpoint", + "thiserror", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -9767,6 +9899,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.1.0" +dependencies = [ + "async-trait", + "merlin", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 9909e6f893877..60de2dc81a669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,8 @@ members = [ "bin/node/rpc", "bin/node/runtime", "bin/node/testing", + "bin/node-sassafras/node", + "bin/node-sassafras/runtime", "bin/utils/chain-spec-builder", "bin/utils/subkey", "client/api", @@ -30,6 +32,7 @@ members = [ "client/consensus/epochs", "client/consensus/manual-seal", "client/consensus/pow", + "client/consensus/sassafras", "client/consensus/slots", "client/consensus/uncles", "client/db", @@ -121,6 +124,7 @@ members = [ "frame/recovery", "frame/referenda", "frame/remark", + "frame/sassafras", "frame/scheduler", "frame/scored-pool", "frame/session", @@ -168,6 +172,7 @@ members = [ "primitives/consensus/babe", "primitives/consensus/common", "primitives/consensus/pow", + "primitives/consensus/sassafras", "primitives/consensus/vrf", "primitives/core", "primitives/core/hashing", diff --git a/bin/node-sassafras/.editorconfig b/bin/node-sassafras/.editorconfig new file mode 100644 index 0000000000000..5adac74ca24b3 --- /dev/null +++ b/bin/node-sassafras/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style=space +indent_size=2 +tab_width=2 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline = true + +[*.{rs,toml}] +indent_style=tab +indent_size=tab +tab_width=4 +max_line_length=100 diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml new file mode 100644 index 0000000000000..c99e7bf5ef0ed --- /dev/null +++ b/bin/node-sassafras/node/Cargo.toml @@ -0,0 +1,72 @@ +[package] +name = "node-sassafras" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Node testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false +build = "build.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[bin]] +name = "node-sassafras" + +[dependencies] +clap = { version = "3.1.18", features = ["derive"] } + +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.10.0-dev", path = "../../../client/service", features = ["wasmtime"] } +sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } +sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-consensus-sassafras = { version = "0.1.0", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.1.0", path = "../../../primitives/consensus/sassafras" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } + +# These dependencies are used for the node template's RPCs +jsonrpsee = { version = "0.14.0", features = ["server"] } +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } + +# Local Dependencies +node-sassafras-runtime = { version = "0.1.0", path = "../runtime" } + +# CLI-specific dependencies +try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } + +[build-dependencies] +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } + +[features] +default = [] +runtime-benchmarks = ["node-sassafras-runtime/runtime-benchmarks"] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = ["node-sassafras-runtime/try-runtime", "try-runtime-cli"] diff --git a/bin/node-sassafras/node/build.rs b/bin/node-sassafras/node/build.rs new file mode 100644 index 0000000000000..e3bfe3116bf28 --- /dev/null +++ b/bin/node-sassafras/node/build.rs @@ -0,0 +1,7 @@ +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + + rerun_if_git_head_changed(); +} diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs new file mode 100644 index 0000000000000..ed189a6964976 --- /dev/null +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -0,0 +1,158 @@ +use node_sassafras_runtime::{ + AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, Signature, + SudoConfig, SystemConfig, WASM_BINARY, +}; +use sc_service::ChainType; +use sp_consensus_sassafras::AuthorityId as SassafrasId; +use sp_core::{sr25519, Pair, Public}; +use sp_finality_grandpa::AuthorityId as GrandpaId; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +// The URL for the telemetry server. +// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec; + +/// Generate a crypto pair from seed. +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Generate an account ID from seed. +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Generate authority keys from seed. +pub fn authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { + (get_from_seed::(s), get_from_seed::(s)) +} + +pub fn development_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; + + Ok(ChainSpec::from_genesis( + // Name + "Development", + // ID + "dev", + ChainType::Development, + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice")], + // Sudo account + get_account_id_from_seed::("Alice"), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + None, + None, + // Properties + None, + // Extensions + None, + )) +} + +pub fn local_testnet_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; + + Ok(ChainSpec::from_genesis( + // Name + "Local Testnet", + // ID + "local_testnet", + ChainType::Local, + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + // Sudo account + get_account_id_from_seed::("Alice"), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ) + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + None, + // Properties + None, + None, + // Extensions + None, + )) +} + +/// Configure initial storage state for FRAME modules. +fn testnet_genesis( + wasm_binary: &[u8], + initial_authorities: Vec<(SassafrasId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, + _enable_println: bool, +) -> GenesisConfig { + GenesisConfig { + system: SystemConfig { + // Add Wasm runtime to storage. + code: wasm_binary.to_vec(), + }, + balances: BalancesConfig { + // Configure endowed accounts with initial balance of 1 << 60. + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + }, + + sassafras: SassafrasConfig { + authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), + epoch_config: Some(node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG), + }, + grandpa: GrandpaConfig { + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + }, + sudo: SudoConfig { + // Assign network admin rights. + key: Some(root_key), + }, + transaction_payment: Default::default(), + } +} diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs new file mode 100644 index 0000000000000..bb2ffa1938107 --- /dev/null +++ b/bin/node-sassafras/node/src/cli.rs @@ -0,0 +1,49 @@ +use sc_cli::RunCmd; + +#[derive(Debug, clap::Parser)] +pub struct Cli { + #[clap(subcommand)] + pub subcommand: Option, + + #[clap(flatten)] + pub run: RunCmd, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + /// Key management cli utilities + #[clap(subcommand)] + Key(sc_cli::KeySubcommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Try some command against runtime state. + #[cfg(feature = "try-runtime")] + TryRuntime(try_runtime_cli::TryRuntimeCmd), + + /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. + #[cfg(not(feature = "try-runtime"))] + TryRuntime, + + /// Db meta columns information. + ChainInfo(sc_cli::ChainInfoCmd), +} diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs new file mode 100644 index 0000000000000..cf17c37968f54 --- /dev/null +++ b/bin/node-sassafras/node/src/command.rs @@ -0,0 +1,133 @@ +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; +use node_sassafras_runtime::Block; +use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; +use sc_service::PartialComponents; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Substrate Node".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "support.anonymous.an".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()?), + "" | "local" => Box::new(chain_spec::local_testnet_config()?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) + } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &node_sassafras_runtime::VERSION + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + + match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; + let aux_revert = Box::new(|client, _, blocks| { + sc_finality_grandpa::revert(client, blocks)?; + Ok(()) + }); + Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) + }) + }, + #[cfg(feature = "try-runtime")] + Some(Subcommand::TryRuntime(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + // we don't need any of the components of new_partial, just a runtime, or a task + // manager to do `async_run`. + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + let task_manager = + sc_service::TaskManager::new(config.tokio_handle.clone(), registry) + .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; + Ok((cmd.run::(config), task_manager)) + }) + }, + #[cfg(not(feature = "try-runtime"))] + Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ + You can enable it with `--features try-runtime`." + .into()), + Some(Subcommand::ChainInfo(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(&config)) + }, + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + service::new_full(config).map_err(sc_cli::Error::Service) + }) + }, + } +} diff --git a/bin/node-sassafras/node/src/main.rs b/bin/node-sassafras/node/src/main.rs new file mode 100644 index 0000000000000..4449d28b9fa41 --- /dev/null +++ b/bin/node-sassafras/node/src/main.rs @@ -0,0 +1,13 @@ +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; +mod rpc; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/bin/node-sassafras/node/src/rpc.rs b/bin/node-sassafras/node/src/rpc.rs new file mode 100644 index 0000000000000..4964c5c15fc06 --- /dev/null +++ b/bin/node-sassafras/node/src/rpc.rs @@ -0,0 +1,57 @@ +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use jsonrpsee::RpcModule; +use node_sassafras_runtime::{opaque::Block, AccountId, Balance, Index}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; + +pub use sc_rpc_api::DenyUnsafe; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> Result, Box> +where + C: ProvideRuntimeApi, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: BlockBuilder, + P: TransactionPool + 'static, +{ + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; + + let mut module = RpcModule::new(()); + let FullDeps { client, pool, deny_unsafe } = deps; + + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client).into_rpc())?; + + // Extend this RPC with a custom API by using the following syntax. + // `YourRpcStruct` should have a reference to a client, which is needed + // to call into the runtime. + // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` + + Ok(module) +} diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs new file mode 100644 index 0000000000000..ec8f10c1a59b1 --- /dev/null +++ b/bin/node-sassafras/node/src/service.rs @@ -0,0 +1,353 @@ +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{BlockBackend, ExecutorProvider}; +pub use sc_executor::NativeElseWasmExecutor; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use std::{sync::Arc, time::Duration}; + +// Our native executor instance. +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + /// Only enable the benchmarking host functions when we actually want to benchmark. + #[cfg(feature = "runtime-benchmarks")] + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + /// Otherwise we only use the default Substrate host functions. + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + node_sassafras_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + node_sassafras_runtime::native_version() + } +} + +pub(crate) type FullClient = + sc_service::TFullClient>; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +type FullGrandpaBlockImport = + sc_finality_grandpa::GrandpaBlockImport; + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + Option, + ( + sc_consensus_sassafras::SassafrasBlockImport< + Block, + FullClient, + FullGrandpaBlockImport, + >, + sc_finality_grandpa::LinkHalf, + sc_consensus_sassafras::SassafrasLink, + ), + ), + >, + ServiceError, +> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other("Remote Keystores are not supported.".into())) + } + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + config.runtime_cache_size, + ); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let justification_import = grandpa_block_import.clone(); + + let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( + sc_consensus_sassafras::Config::get(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let slot_duration = sassafras_link.config().slot_duration(); + + let import_queue = sc_consensus_sassafras::import_queue( + sassafras_link.clone(), + sassafras_block_import.clone(), + Some(Box::new(justification_import)), + client.clone(), + select_chain.clone(), + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let import_setup = (sassafras_block_import, grandpa_link, sassafras_link); + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (telemetry, import_setup), + }) +} + +fn remote_keystore(_url: &String) -> Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + +/// Builds a new service for a full client. +pub fn new_full(mut config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + mut keystore_container, + select_chain, + transaction_pool, + other: (mut telemetry, import_setup), + } = new_partial(&config)?; + + let (block_import, grandpa_link, sassafras_link) = import_setup; + + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => + return Err(ServiceError::Other(format!( + "Error hooking up remote keystore for {}: {}", + url, e + ))), + }; + } + let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name( + &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), + &config.chain_spec, + ); + + config + .network + .extra_sets + .push(sc_finality_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone())); + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + Vec::default(), + )); + + let (network, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync: Some(warp_sync), + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let slot_duration = sassafras_link.config().slot_duration(); + + let sassafras_config = sc_consensus_sassafras::SassafrasParams { + client: client.clone(), + keystore: keystore_container.sync_keystore(), + select_chain, + env: proposer, + block_import, + sassafras_link, + sync_oracle: network.clone(), + justification_sync_link: network.clone(), + force_authoring, + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + Ok((timestamp, slot)) + }, + can_author_with, + }; + + let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_config)?; + + // the Sassafras authoring task is considered essential, i.e. if it + // fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "sassafras", + Some("block-authoring"), + sassafras, + ); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = sc_finality_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, + ); + } + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml new file mode 100644 index 0000000000000..233d9e0e14bbb --- /dev/null +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -0,0 +1,103 @@ +[package] +name = "node-sassafras-runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Runtime testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../../frame/sassafras" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} +sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-core = { version = "6.0.0", default-features = false, path = "../../../primitives/core" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} +sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } + +# Used for the node template's RPCs +frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } + +# Used for runtime benchmarking +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +hex-literal = { version = "0.3.4", optional = true } + +[build-dependencies] +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "frame-executive/std", + "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "pallet-sassafras/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-randomness-collective-flip/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-sassafras/std", + "sp-core/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-transaction-pool/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking", + "frame-system/runtime-benchmarks", + "hex-literal", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-executive/try-runtime", + "frame-try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-randomness-collective-flip/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", +] diff --git a/bin/node-sassafras/runtime/build.rs b/bin/node-sassafras/runtime/build.rs new file mode 100644 index 0000000000000..9b53d2457dffd --- /dev/null +++ b/bin/node-sassafras/runtime/build.rs @@ -0,0 +1,9 @@ +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs new file mode 100644 index 0000000000000..c458605375ab1 --- /dev/null +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -0,0 +1,562 @@ +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +// A few exports that help ease life for downstream crates. +pub use frame_support::{ + construct_runtime, parameter_types, + traits::{ + ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, + }, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, Weight, + }, + StorageValue, +}; +pub use frame_system::Call as SystemCall; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_timestamp::Call as TimestampCall; +use pallet_transaction_payment::CurrencyAdapter; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +/// An index to a block. +pub type BlockNumber = u32; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// Balance of an account. +pub type Balance = u128; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; + +/// Type used for expressing timestamp. +pub type Moment = u64; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core data structures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; + + impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, + } + } +} + +// To learn more about runtime versioning and what each of the following value means: +// https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("node-sassafras"), + impl_name: create_runtime_str!("node-sassafras"), + authoring_version: 1, + // The version of the runtime specification. A full node will not attempt to use its native + // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + // `spec_version`, and `authoring_version` are the same between Wasm and native. + // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use + // the compatible custom types. + spec_version: 100, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_sassafras` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. +pub const MILLISECS_PER_BLOCK: u64 = 6000; + +// NOTE: Currently it is not possible to change the slot duration after the chain has started. +// Attempting to do so will brick block production. +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +// TODO-SASS-P4: this is an intentional small value used for testing +pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10; + +pub const EPOCH_DURATION_IN_SLOTS: u64 = { + const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; + + (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 +}; + +// Time is measured by number of blocks. +pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); +pub const HOURS: BlockNumber = MINUTES * 60; +pub const DAYS: BlockNumber = HOURS * 24; + +pub const MAX_AUTHORITIES: u32 = 32; + +/// The Sassafras epoch configuration at genesis. +pub const SASSAFRAS_GENESIS_EPOCH_CONFIG: sp_consensus_sassafras::SassafrasEpochConfiguration = + sp_consensus_sassafras::SassafrasEpochConfiguration { + // TODO-SASS-P2 + }; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + Call: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = Call; +} + +parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; + pub const Version: RuntimeVersion = VERSION; + /// We allow for 2 seconds of compute with a 6 second average block time. + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub const SS58Prefix: u8 = 42; +} + +// Configure FRAME pallets to include in runtime. + +impl frame_system::Config for Runtime { + /// The basic call filter to use in dispatchable. + type BaseCallFilter = frame_support::traits::Everything; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = BlockLength; + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = AccountIdLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// The weight of database operations that the runtime can invoke. + type DbWeight = RocksDbWeight; + /// Version of the runtime. + type Version = Version; + /// Converts a module to the index of the module in `construct_runtime!`. + /// + /// This type is being generated by `construct_runtime!`. + type PalletInfo = PalletInfo; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_randomness_collective_flip::Config for Runtime {} + +parameter_types! { + pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; + pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; +} + +impl pallet_sassafras::Config for Runtime { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; + type MaxAuthorities = ConstU32; + type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; + // TODO-SASS-P4. Add some redundancy before starting tickets drop. + type MaxSubmittedTickets = ConstU32<{ 3 * EPOCH_DURATION_IN_SLOTS as u32 }>; +} + +impl pallet_grandpa::Config for Runtime { + type Event = Event; + type Call = Call; + type KeyOwnerProofSystem = (); + type KeyOwnerProof = + >::Proof; + type KeyOwnerIdentification = >::IdentificationTuple; + type HandleEquivocation = (); + type WeightInfo = (); + type MaxAuthorities = ConstU32; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = (); +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ConstU128<500>; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; +} + +impl pallet_transaction_payment::Config for Runtime { + type Event = Event; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type Event = Event; + type Call = Call; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + RandomnessCollectiveFlip: pallet_randomness_collective_flip, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [frame_benchmarking, BaselineBench::] + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_timestamp, Timestamp] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn configuration() -> sp_consensus_sassafras::SassafrasGenesisConfiguration { + sp_consensus_sassafras::SassafrasGenesisConfiguration { + slot_duration: Sassafras::slot_duration(), + epoch_length: EpochDuration::get(), + genesis_authorities: Sassafras::authorities().to_vec(), + randomness: Sassafras::randomness(), + } + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + Sassafras::submit_tickets_unsigned_extrinsic(tickets) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { + Sassafras::slot_ticket(slot) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + opaque::SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + NumberFor, + >, + _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + _authority_id: GrandpaId, + ) -> Option { + // NOTE: this is the only implementation possible since we've + // defined our key owner proof type as a bottom type (i.e. a type + // with no values). + None + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, TrackedStorageKey}; + + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + impl frame_system_benchmarking::Config for Runtime {} + impl baseline::Config for Runtime {} + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade() -> (Weight, Weight) { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. If any of the pre/post migration checks fail, we shall stop + // right here and right now. + let weight = Executive::try_runtime_upgrade().unwrap(); + (weight, BlockWeights::get().max_block) + } + + fn execute_block_no_check(block: Block) -> Weight { + Executive::execute_block_no_check(block) + } + } +} diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..51400bd66721d --- /dev/null +++ b/client/consensus/sassafras/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "sc-consensus-sassafras" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Sassafras consensus algorithm for substrate" +edition = "2021" +license = "Apache 2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-consensus-sassafras" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +async-trait = "0.1.50" +scale-codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +log = "0.4.16" +parking_lot = "0.12.0" +retain_mut = "0.1.4" +thiserror = "1.0" +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +schnorrkel = "0.9.1" +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } +sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } +sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-application-crypto = { version = "6.0.0", path = "../../../primitives/application-crypto" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-consensus-sassafras = { version = "0.1.0", path = "../../../primitives/consensus/sassafras" } +sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } +sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } +sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs new file mode 100644 index 0000000000000..c8f39497ffa5e --- /dev/null +++ b/client/consensus/sassafras/src/authorship.rs @@ -0,0 +1,185 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Sassafras authority selection and slot claiming. + +use crate::Epoch; + +use scale_codec::Encode; +use sp_application_crypto::AppKey; +use sp_consensus_sassafras::{ + digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, + Ticket, TicketInfo, SASSAFRAS_TICKET_VRF_PREFIX, +}; +use sp_consensus_vrf::schnorrkel::{PublicKey, VRFInOut, VRFOutput, VRFProof}; +use sp_core::{twox_64, ByteArray}; +use sp_keystore::{vrf::make_transcript, SyncCryptoStore, SyncCryptoStorePtr}; + +/// Get secondary authority index for the given epoch and slot. +#[inline] +pub fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { + u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) % + epoch.authorities.len() as u64 +} + +/// Try to claim an epoch slot. +/// If ticket is `None`, then the slot should be claimed using the fallback mechanism. +pub fn claim_slot( + slot: Slot, + epoch: &Epoch, + ticket: Option, + keystore: &SyncCryptoStorePtr, +) -> Option<(PreDigest, AuthorityId)> { + let (authority_index, ticket_info) = match ticket { + Some(ticket) => { + log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); + let ticket_info = epoch.tickets_info.get(&ticket)?.clone(); + log::debug!(target: "sassafras", "🌳 Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", + &ticket.as_bytes()[0..8], ticket_info.authority_index, ticket_info.attempt); + let idx = ticket_info.authority_index as u64; + (idx, Some(ticket_info)) + }, + None => { + log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); + (secondary_authority_index(slot, epoch), None) + }, + }; + + let authority_id = epoch.authorities.get(authority_index as usize).map(|auth| &auth.0)?; + + let transcript_data = make_slot_transcript_data(&epoch.randomness, slot, epoch.epoch_index); + let result = SyncCryptoStore::sr25519_vrf_sign( + &**keystore, + AuthorityId::ID, + authority_id.as_ref(), + transcript_data, + ); + + match result { + Ok(Some(signature)) => { + let pre_digest = PreDigest { + authority_index: authority_index as u32, + slot, + block_vrf_output: VRFOutput(signature.output), + block_vrf_proof: VRFProof(signature.proof.clone()), + ticket_info, + }; + Some((pre_digest, authority_id.clone())) + }, + _ => None, + } +} + +/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: +/// - x: redundancy factor; +/// - s: number of slots in epoch; +/// - a: max number of attempts; +/// - v: number of validator in epoch. +/// The parameters should be chosen such that T <= 1. +/// If `attempts * validators` is zero then we fallback to T = 0 +// TODO-SASS-P3: this formula must be double-checked... +#[inline] +fn calculate_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> u128 { + let den = attempts as u128 * validators as u128; + let num = redundancy as u128 * slots as u128; + let res = u128::MAX.checked_div(den).unwrap_or(0).saturating_mul(num); + + // TODO-SASS-P4 remove me + log::debug!( + target: "sassafras", + "🌳 Tickets threshold: {} {:016x}", num as f64 / den as f64, res, + ); + res +} + +/// Returns true if the given VRF output is lower than the given threshold, false otherwise. +#[inline] +pub fn check_threshold(inout: &VRFInOut, threshold: u128) -> bool { + u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(SASSAFRAS_TICKET_VRF_PREFIX)) < threshold +} + +/// Generate the tickets for the given epoch. +/// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` +/// structure. The additional information will be used during epoch to claim slots. +pub fn generate_epoch_tickets( + epoch: &mut Epoch, + max_attempts: u32, + redundancy_factor: u32, + keystore: &SyncCryptoStorePtr, +) -> Vec { + let mut tickets = vec![]; + + let threshold = calculate_threshold( + redundancy_factor, + epoch.duration as u32, + max_attempts, + epoch.authorities.len() as u32, + ); + + let authorities = epoch.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); + for (authority_index, authority_id) in authorities { + let raw_key = authority_id.to_raw_vec(); + + if !SyncCryptoStore::has_keys(&**keystore, &[(raw_key.clone(), AuthorityId::ID)]) { + continue + } + + let public = match PublicKey::from_bytes(&raw_key) { + Ok(public) => public, + Err(_) => continue, + }; + + let get_ticket = |attempt| { + let transcript_data = + make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); + + // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, + // But we need to modify the CryptoStore interface first. + let signature = SyncCryptoStore::sr25519_vrf_sign( + &**keystore, + AuthorityId::ID, + authority_id.as_ref(), + transcript_data.clone(), + ) + .ok()??; + + let transcript = make_transcript(transcript_data); + let inout = signature.output.attach_input_hash(&public, transcript).ok()?; + if !check_threshold(&inout, threshold) { + return None + } + + let ticket = VRFOutput(signature.output); + let ticket_info = TicketInfo { + attempt: attempt as u32, + authority_index: authority_index as u32, + proof: VRFProof(signature.proof), + }; + + Some((ticket, ticket_info)) + }; + + for attempt in 0..max_attempts { + if let Some((ticket, ticket_info)) = get_ticket(attempt) { + tickets.push(ticket); + epoch.tickets_info.insert(ticket, ticket_info); + } + } + } + tickets +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs new file mode 100644 index 0000000000000..59f53415a31d2 --- /dev/null +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Schema for Sassafras epoch changes in the auxiliary db. + +use scale_codec::{Decode, Encode}; + +use sc_client_api::backend::AuxStore; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus_sassafras::SassafrasBlockWeight; +use sp_runtime::traits::Block as BlockT; + +use crate::Epoch; + +const SASSAFRAS_EPOCH_CHANGES_KEY: &[u8] = b"sassafras_epoch_changes"; + +/// The aux storage key used to store the block weight of the given block hash. +fn block_weight_key(block_hash: H) -> Vec { + (b"sassafras_block_weight", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + match backend.get_aux(key)? { + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(|e| { + ClientError::Backend(format!("Sassafras db is corrupted, Decode error: {}", e)) + }), + None => Ok(None), + } +} + +/// Update the epoch changes on disk after a change. +pub fn write_epoch_changes( + epoch_changes: &EpochChangesFor, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + epoch_changes.using_encoded(|s| write_aux(&[(SASSAFRAS_EPOCH_CHANGES_KEY, s)])) +} + +/// Load or initialize persistent epoch change data from backend. +pub fn load_epoch_changes( + backend: &AS, +) -> ClientResult> { + let maybe_epoch_changes = + load_decode::<_, EpochChangesFor>(backend, SASSAFRAS_EPOCH_CHANGES_KEY)?; + + let epoch_changes = SharedEpochChanges::::new( + maybe_epoch_changes.unwrap_or_else(|| EpochChangesFor::::default()), + ); + + // Rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.shared_data().rebalance(); + + Ok(epoch_changes) +} + +/// Write the cumulative chain-weight of a block ot aux storage. +pub fn write_block_weight( + block_hash: H, + block_weight: SassafrasBlockWeight, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| write_aux(&[(key, s)])) +} + +/// Load the cumulative chain-weight associated with a block. +pub fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..c83b84cb0ff37 --- /dev/null +++ b/client/consensus/sassafras/src/lib.rs @@ -0,0 +1,1481 @@ +// This file is part of Substrate. + +// This file is part of SubstrateNonepyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # Sassafras +//! +//! TODO-SASS-P2: documentation + +#![deny(warnings)] +#![forbid(unsafe_code, missing_docs)] + +use std::{ + borrow::Cow, + collections::{BTreeMap, HashMap}, + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + prelude::*, +}; +use log::{debug, error, info, log, trace, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry; +use retain_mut::RetainMut; +use scale_codec::{Decode, Encode}; +use schnorrkel::SignatureError; + +use sc_client_api::{ + backend::AuxStore, BlockchainEvents, PreCommitActions, ProvideUncles, UsageProvider, +}; +use sc_consensus::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + StateAction, + }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, +}; +use sc_consensus_epochs::{ + descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, + SharedEpochChanges, ViableEpochDescriptor, +}; +use sc_consensus_slots::{ + check_equivocation, CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges, +}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_application_crypto::AppKey; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus::{ + BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, + SelectChain, SyncOracle, +}; +use sp_consensus_slots::{Slot, SlotDuration}; +use sp_core::{crypto::ByteArray, ExecutionContext}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, Header, NumberFor, One, Zero}, + DigestItem, +}; + +// Re-export Sassafras primitives. +pub use sp_consensus_sassafras::{ + digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, + inherents::SassafrasInherentData, + AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, SassafrasGenesisConfiguration, Ticket, TicketInfo, VRFOutput, + VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, +}; + +mod authorship; +mod aux_schema; +mod verification; + +/// Sassafras epoch information +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// The duration of this epoch in slots. + pub duration: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// Configuration of the epoch. + pub config: SassafrasEpochConfiguration, + /// Tickets metadata. + pub tickets_info: BTreeMap, +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + // TODO-SASS-P2: allow config change on epoch change + config: self.config.clone(), + tickets_info: BTreeMap::new(), + } + } + + fn start_slot(&self) -> Slot { + self.start_slot + } + + fn end_slot(&self) -> Slot { + self.start_slot + self.duration + } +} + +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(genesis_config: &SassafrasGenesisConfiguration, slot: Slot) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot, + duration: genesis_config.epoch_length, + authorities: genesis_config.genesis_authorities.clone(), + randomness: genesis_config.randomness, + config: SassafrasEpochConfiguration {}, + tickets_info: BTreeMap::new(), + } + } +} + +/// Errors encountered by the Sassafras authorship task. +/// TODO-SASS-P2: remove unused errors. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Multiple Sassafras pre-runtime digests + #[error("Multiple Sassafras pre-runtime digests")] + MultiplePreRuntimeDigests, + /// No Sassafras pre-runtime digest found + #[error("No Sassafras pre-runtime digest found")] + NoPreRuntimeDigest, + /// Multiple Sassafras epoch change digests + #[error("Multiple Sassafras epoch change digests")] + MultipleEpochChangeDigests, + // /// Multiple Sassafras config change digests + // #[error("Multiple Sassafras config change digests, rejecting!")] + // MultipleConfigChangeDigests, + // /// Could not extract timestamp and slot + // #[error("Could not extract timestamp and slot: {0}")] + // Extraction(sp_consensus::Error), + /// Could not fetch epoch + #[error("Could not fetch epoch at {0:?}")] + FetchEpoch(B::Hash), + /// Header rejected: too far in the future + #[error("Header {0:?} rejected: too far in the future")] + TooFarInFuture(B::Hash), + /// Parent unavailable. Cannot import + #[error("Parent ({0}) of {1} unavailable. Cannot import")] + ParentUnavailable(B::Hash, B::Hash), + /// Slot number must increase + #[error("Slot number must increase: parent slot: {0}, this slot: {1}")] + SlotMustIncrease(Slot, Slot), + /// Header has a bad seal + #[error("Header {0:?} has a bad seal")] + HeaderBadSeal(B::Hash), + /// Header is unsealed + #[error("Header {0:?} is unsealed")] + HeaderUnsealed(B::Hash), + /// Slot author not found + #[error("Slot author not found")] + SlotAuthorNotFound, + /// Bad signature + #[error("Bad signature on {0:?}")] + BadSignature(B::Hash), + // /// Invalid author: Expected secondary author + // #[error("Invalid author: Expected secondary author: {0:?}, got: {1:?}.")] + // InvalidAuthor(AuthorityId, AuthorityId), + // /// VRF verification of block by author failed + // #[error("VRF verification of block by author {0:?} failed: threshold {1} exceeded")] + // VRFVerificationOfBlockFailed(AuthorityId, u128), + /// VRF verification failed + #[error("VRF verification failed: {0:?}")] + VRFVerificationFailed(SignatureError), + /// Unexpected authoring mechanism + #[error("Unexpected authoring mechanism")] + UnexpectedAuthoringMechanism, + /// Could not fetch parent header + #[error("Could not fetch parent header: {0}")] + FetchParentHeader(sp_blockchain::Error), + /// Expected epoch change to happen. + #[error("Expected epoch change to happen at {0:?}, s{1}")] + ExpectedEpochChange(B::Hash, Slot), + // /// Unexpected config change. + // #[error("Unexpected config change")] + // UnexpectedConfigChange, + /// Unexpected epoch change + #[error("Unexpected epoch change")] + UnexpectedEpochChange, + /// Parent block has no associated weight + #[error("Parent block of {0} has no associated weight")] + ParentBlockNoAssociatedWeight(B::Hash), + /// Check inherents error + #[error("Checking inherents failed: {0}")] + CheckInherents(sp_inherents::Error), + /// Unhandled check inherents error + #[error("Checking inherents unhandled error: {}", String::from_utf8_lossy(.0))] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + /// Create inherents error. + #[error("Creating inherents failed: {0}")] + CreateInherents(sp_inherents::Error), + /// Client error + #[error(transparent)] + Client(sp_blockchain::Error), + /// Runtime Api error. + #[error(transparent)] + RuntimeApi(sp_api::ApiError), + /// Fork tree error + #[error(transparent)] + ForkTree(Box>), +} + +impl From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +fn sassafras_err(error: Error) -> Error { + error!(target: "sassafras", "🌳 {}", error); + error +} + +/// Intermediate value passed to block importer. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +} + +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; + +/// Configuration for Sassafras used for defining block verification parameters as +/// well as authoring (e.g. the slot duration). +#[derive(Clone)] +pub struct Config { + genesis_config: SassafrasGenesisConfiguration, +} + +impl Config { + /// Read Sassafras genesis configuration from the runtime. + /// + /// TODO-SASS-P4: (FIXME) + /// This doesn't return the genesis configuration, but the Configuration at best block. + /// There is an open [PR](https://github.com/paritytech/substrate/pull/11760) for BABE, + /// we'll follow the same strategy once it is closed. + pub fn get(client: &C) -> ClientResult + where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: SassafrasApi, + { + let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); + if client.usage_info().chain.finalized_state.is_none() { + debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); + best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); + } + + let genesis_config = client.runtime_api().configuration(&best_block_id)?; + + Ok(Config { genesis_config }) + } + + /// Get the genesis configuration. + pub fn genesis_config(&self) -> &SassafrasGenesisConfiguration { + &self.genesis_config + } + + /// Get the slot duration defined in the genesis configuration. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.genesis_config.slot_duration) + } +} + +/// Parameters for Sassafras. +pub struct SassafrasParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: SyncCryptoStorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + /// The source of timestamps for relative slots + pub sassafras_link: SassafrasLink, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + can_author_with, + }: SassafrasParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + PreCommitActions + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer>, + I: BlockImport> + + Send + + Sync + + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send + Sync + 'static, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let worker = SassafrasSlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + config: sassafras_link.config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + sassafras_link.config.slot_duration(), + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), + sync_oracle, + create_inherent_data_providers, + can_author_with, + ); + + let ticket_worker = tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(ticket_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} + +async fn tickets_worker( + client: Arc, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); + + let tickets = { + let mut epoch_changes = epoch_changes.shared_data(); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let mut epoch_identifier = + EpochIdentifier { position, hash: notification.hash, number }; + + let epoch = match epoch_changes.epoch_mut(&mut epoch_identifier) { + Some(epoch) => epoch, + None => { + warn!(target: "sassafras", "🌳 Unexpected missing epoch data for {}", notification.hash); + continue + }, + }; + + authorship::generate_epoch_tickets(epoch, 30, 1, &keystore) + }; + + if tickets.is_empty() { + continue + } + + // Get the best block on which we will build and send the tickets. + let best_id = match select_chain.best_chain().await { + Ok(header) => BlockId::Hash(header.hash()), + Err(err) => { + error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); + continue + }, + }; + + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + if let Some(err) = err { + error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + // TODO-SASS-P2: on error remove tickets from epoch... + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +struct SassafrasSlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + config: Config, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SassafrasSlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "sassafras" + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn epoch_data( + &self, + parent: &B::Header, + slot: Slot, + ) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + async fn claim_slot( + &self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); + + // Get the next slot ticket from the runtime. + let block_id = BlockId::Hash(parent_header.hash()); + let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; + + // TODO-SASS-P2 + debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); + + let claim = authorship::claim_slot( + slot, + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + })? + .as_ref(), + ticket, + &self.keystore, + ); + + if claim.is_some() { + debug!(target: "sassafras", "🌳 Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + true + } else { + false + }, + } + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![::sassafras_pre_digest(claim.0.clone())] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges<>::Transaction, B>, + (_, public): Self::Claim, + epoch_descriptor: Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>::Transaction>, + sp_consensus::Error, + > { + // Sign the pre-sealed hash of the block and then add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*self.keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = ::sassafras_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok(import_block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO-SASS-P2 + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO-SASS-P2 + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +/// Extract the Sassafras pre digest from the given header. Pre-runtime digests are +/// mandatory, the function will return `Err` if none is found. +pub fn find_pre_digest(header: &B::Header) -> Result> { + // Genesis block doesn't contain a pre digest so let's generate a + // dummy one to not break any invariants in the rest of the code + if header.number().is_zero() { + const PROOF: &str = "zero sequence is a valid vrf output/proof; qed"; + let block_vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); + let block_vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); + return Ok(PreDigest { + authority_index: 0, + slot: 0.into(), + block_vrf_output, + block_vrf_proof, + ticket_info: None, + }) + } + + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "sassafras", "🌳 Checking log {:?}, looking for pre runtime digest", log); + match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(sassafras_err(Error::MultiplePreRuntimeDigests)), + (None, _) => trace!(target: "sassafras", "🌳 Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| sassafras_err(Error::NoPreRuntimeDigest)) +} + +/// Extract the Sassafras epoch change digest from the given header, if it exists. +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> { + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "sassafras", "🌳 Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(sassafras_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: "sassafras", "🌳 Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) +} + +/// State that must be shared between the import queue and the authoring logic. +#[derive(Clone)] +pub struct SassafrasLink { + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl SassafrasLink { + /// Get the epoch changes of this link. + pub fn epoch_changes(&self) -> &SharedEpochChanges { + &self.epoch_changes + } + + /// Get the config of this link. + pub fn config(&self) -> &Config { + &self.config + } +} + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + config: Config, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "sassafras", + "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let _best_id: BlockId = self + .select_chain + .best_chain() + .await + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // TODO-SASS-P2 + + Ok(()) + } +} + +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> BlockVerificationResult { + trace!( + target: "sassafras", + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } + + trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&block.header)?; + + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let ticket = self + .client + .runtime_api() + .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) + .map_err(|err| err.to_string())?; + + let v_params = verification::VerificationParams { + header: block.header.clone(), + pre_digest, + slot_now, + epoch: viable_epoch.as_ref(), + ticket, + }; + + (verification::check_header::(v_params)?, epoch_descriptor) + }; + + match check_header { + CheckedHeader::Checked(pre_header, verified_info) => { + let sassafras_pre_digest = verified_info + .pre_digest + .as_sassafras_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + let slot = sassafras_pre_digest.slot; + + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &block.header, + &verified_info.author, + &block.origin, + ) + .await + { + warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(slot); + let new_block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + block.post_hash = Some(hash); + + Ok((block, Default::default())) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} + +/// A block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + config: self.config.clone(), + } + } +} + +impl SassafrasBlockImport { + fn new( + client: Arc, + epoch_changes: SharedEpochChanges, + block_import: I, + config: Config, + ) -> Self { + SassafrasBlockImport { client, inner: block_import, epoch_changes, config } + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid sassafras headers must contain a predigest; header has been already verified; qed", + ); + let slot = pre_digest.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( + "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ + header has already been verified; qed", + ); + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // If there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + // Use an extra scope to make the compiler happy, because otherwise he complains about the + // mutex, even if we dropped it... + let mut epoch_changes = { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + + let added_weight = pre_digest.ticket_info.is_some() as u32; + let total_weight = parent_weight + added_weight; + + // Search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.config.genesis_config, slot) + }) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "sassafras", + log_level, + "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "sassafras", + log_level, + "🌳 🍁 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| { + ConsensusError::ClientImport(format!( + "Error importing epoch changes: {}", + e + )) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + *epoch_changes = + old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + "No block weight for parent header.".to_string(), + ) + })? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; + // Release the mutex, but it stays locked + epoch_changes.release_mutex() + }; + + let import_result = self.inner.import_block(block, new_cache).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + if import_result.is_err() { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes.upgrade() = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + if info.block_gap.is_none() { + epoch_changes.clear_gap(); + } + + let finalized_slot = { + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect( + "best finalized hash was given by client; finalized headers must exist in db; qed", + ); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + config: Config, + wrapped_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + HeaderBackend + HeaderMetadata + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + let link = SassafrasLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; + + // NOTE: this isn't entirely necessary, but since we didn't use to prune the + // epoch tree it is useful as a migration, so that nodes prune long trees on + // startup rather than waiting until importing the next epoch change block. + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let import = SassafrasBlockImport::new(client, epoch_changes, wrapped_block_import, config); + + Ok((import, link)) +} + +/// Start an import queue for the Sassafras consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`SassafrasLink`), and a future that must be run to completion and is responsible for +/// listening to finality notifications and pruning the epoch changes tree. +/// +/// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, +/// otherwise crucial import logic will be omitted. +pub fn import_queue( + sassafras_link: SassafrasLink, + block_import: Inner, + justification_import: Option>, + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&Registry>, + can_author_with: CAW, + telemetry: Option, +) -> ClientResult> +where + Inner: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, + Client: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + AuxStore + + Send + + Sync + + 'static, + Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + SelectChain: sp_consensus::SelectChain + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + let verifier = SassafrasVerifier { + select_chain, + create_inherent_data_providers, + config: sassafras_link.config, + epoch_changes: sassafras_link.epoch_changes, + can_author_with, + telemetry, + client, + }; + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs new file mode 100644 index 0000000000000..3c4dbef92f01a --- /dev/null +++ b/client/consensus/sassafras/src/verification.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Verification for Sassafras headers. + +use super::{authorship, sassafras_err, BlockT, Epoch, Error}; +use sc_consensus_slots::CheckedHeader; +use sp_consensus_sassafras::{ + digests::{CompatibleDigestItem, PreDigest}, + make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, Ticket, +}; +use sp_consensus_slots::Slot; +use sp_core::{ByteArray, Pair}; +use sp_runtime::{traits::Header, DigestItem}; + +// Allowed slot drift. +const MAX_SLOT_DRIFT: u64 = 1; + +/// Sassafras verification parameters +pub struct VerificationParams<'a, B: 'a + BlockT> { + /// The header being verified. + pub header: B::Header, + /// The pre-digest of the header being verified. + pub pre_digest: PreDigest, + /// The slot number of the current time. + pub slot_now: Slot, + /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. + pub epoch: &'a Epoch, + /// Expected ticket for this block. + pub ticket: Option, +} + +pub struct VerifiedHeaderInfo { + pub pre_digest: DigestItem, + pub seal: DigestItem, + pub author: AuthorityId, +} + +/// Check a header has been signed by the right key. If the slot is too far in +/// the future, an error will be returned. If successful, returns the pre-header +/// and the digest item containing the seal. +/// +/// The seal must be the last digest. Otherwise, the whole header is considered +/// unsigned. This is required for security and must not be changed. +/// +/// The given header can either be from a primary or secondary slot assignment, +/// with each having different validation logic. +pub fn check_header( + params: VerificationParams, +) -> Result, Error> { + let VerificationParams { mut header, pre_digest, slot_now, epoch, ticket } = params; + + // Check that the slot is not in the future, with some drift being allowed. + if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { + return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) + } + + let author = match epoch.authorities.get(pre_digest.authority_index as usize) { + Some(author) => author.0.clone(), + None => return Err(sassafras_err(Error::SlotAuthorNotFound)), + }; + + // Check header signature + + let seal = header + .digest_mut() + .pop() + .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + + let signature = seal + .as_sassafras_seal() + .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + + let pre_hash = header.hash(); + if !AuthorityPair::verify(&signature, &pre_hash, &author) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } + + // Check authorship method and claim + + match (&ticket, &pre_digest.ticket_info) { + (Some(ticket), Some(ticket_info)) => { + log::debug!(target: "sassafras", "🌳 checking primary"); + if ticket_info.authority_index != pre_digest.authority_index { + // TODO-SASS-P2 ... we can eventually remove auth index from ticket info + log::error!(target: "sassafras", "🌳 Wrong primary authority index"); + } + let transcript = make_ticket_transcript( + &epoch.randomness, + ticket_info.attempt as u64, + epoch.epoch_index, + ); + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) + .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; + }, + (None, None) => { + log::debug!(target: "sassafras", "🌳 checking secondary"); + let idx = authorship::secondary_authority_index(pre_digest.slot, params.epoch); + if idx != pre_digest.authority_index as u64 { + log::error!(target: "sassafras", "🌳 Wrong secondary authority index"); + } + }, + (Some(_), None) => { + log::warn!(target: "sassafras", "🌳 Unexpected secondary authoring mechanism"); + // TODO-SASS-P2: maybe we can use a different error variant + return Err(Error::UnexpectedAuthoringMechanism) + }, + (None, Some(_)) => { + log::warn!(target: "sassafras", "🌳 Unexpected primary authoring mechanism"); + // TODO-SASS-P2: maybe we will use a different error variant + return Err(Error::UnexpectedAuthoringMechanism) + }, + } + + // Check block-vrf proof + + let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| { + p.vrf_verify(transcript, &pre_digest.block_vrf_output, &pre_digest.block_vrf_proof) + }) + .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; + + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), + seal, + author, + }; + + Ok(CheckedHeader::Checked(header, info)) +} diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a6fbc4bebc796..b5b08d4cb4254 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -488,7 +488,7 @@ pub async fn start_slot_worker( Ok(r) => r, Err(e) => { warn!(target: "slots", "Error while polling for next slot: {}", e); - return + break }, }; diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml new file mode 100644 index 0000000000000..1d3839a9dcfb9 --- /dev/null +++ b/frame/sassafras/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "pallet-sassafras" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Consensus extension module for Sassafras consensus." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.17", default-features = false } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } +scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } + +[features] +default = ["std"] +std = [ + "scale-codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-session/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-application-crypto/std", + "sp-consensus-sassafras/std", + "sp-consensus-vrf/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/sassafras/README.md b/frame/sassafras/README.md new file mode 100644 index 0000000000000..b14c7045f6001 --- /dev/null +++ b/frame/sassafras/README.md @@ -0,0 +1,16 @@ +Sassafras +========= + +Consensus extension module for Sassafras consensus. + +TODO: protocol description + +### References + +* [w3f introduction](https://research.web3.foundation/en/latest/polkadot/block-production/SASSAFRAS.html): + a fairly friendly overview to the protocol building blocks; +* [research paper](https://github.com/w3f/research/tree/master/docs/papers/sass) + from Web3 foundation; +* [ring-vrg paper](https://github.com/w3f/ring-vrf/papers/ring_vrf) + from Web3 foundation; +* [zcash zk-snarks](https://arxiv.org/pdf/2008.00881.pdf); diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs new file mode 100644 index 0000000000000..06155ec86877d --- /dev/null +++ b/frame/sassafras/src/lib.rs @@ -0,0 +1,710 @@ +// Sassafras This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Consensus extension module for Sassafras consensus. +//! +//! Sassafras is a constant-time block production protocol that aims to ensure that +//! there is exactly one block produced with constant time intervals rather multiple +//! or none. +//! +//! We run a lottery to distribute block production slots in an epoch and to fix the +//! order validators produce blocks by the beginning of an epoch. +//! +//! Each validator signs the same VRF input and publish the output onchain. This +//! value is their lottery ticket that can be validated against their public key. +//! +//! We want to keep lottery winners secret, i.e. do not publish their public keys. +//! At the begin of the epoch all the validators tickets are published but not their +//! public keys. +//! +//! A valid tickets are validated when an honest validator reclaims it on block +//! production. +//! +//! To prevent submission of fake tickets, resulting in empty slots, the validator +//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's +//! my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you my keys, but my public key is among those of the +//! nominated validators", that is validated before the lottery. +//! +//! To anonymously publish the ticket to the chain a validator sends their tickets +//! to a random validator who later puts it on-chain as a transaction. + +#![deny(warnings)] +#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use scale_codec::{Decode, Encode}; + +use frame_support::{traits::Get, weights::Weight, BoundedBTreeSet, BoundedVec, WeakBoundedVec}; +use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +use sp_application_crypto::ByteArray; +use sp_consensus_vrf::schnorrkel; +use sp_runtime::{ + generic::DigestItem, + traits::{One, Saturating}, + BoundToRuntimeAppPublic, +}; +use sp_std::prelude::Vec; + +pub use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, + AuthorityId, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, + PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, +}; + +// TODO-SASS-P2: tests and benches + +//#[cfg(test)] +//mod mock; +// +//#[cfg(test)] +//mod tests; +// +//#[cfg(feature = "runtime-benchmarks")] +//mod benchmarking; + +pub use pallet::*; + +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); +} + +/// A type signifying to Sassafras that an external trigger for epoch changes +/// (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes with an internal +/// trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: T::BlockNumber) { + if >::should_epoch_change(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_epoch_change(authorities, next_authorities); + } + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The Sassafras pallet. + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// Configuration parameters. + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_timestamp::Config + SendTransactionTypes> { + /// The amount of time, in slots, that each epoch should last. + /// NOTE: Currently it is not possible to change the epoch duration after the chain has + /// started. Attempting to do so will brick block production. + #[pallet::constant] + type EpochDuration: Get; + + /// The expected average block time at which Sassafras should be creating + /// blocks. Since Sassafras is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + #[pallet::constant] + type ExpectedBlockTime: Get; + + /// Sassafras requires some logic to be triggered on every block to query for whether an + /// epoch has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only + /// be used when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; + + /// Max number of authorities allowed + #[pallet::constant] + type MaxAuthorities: Get; + + /// Max number of tickets that are considered for each epoch. + #[pallet::constant] + type MaxTickets: Get; + + /// Max number of tickets that we are going to consider for each epoch. + #[pallet::constant] + type MaxSubmittedTickets: Get; + } + + // TODO-SASS-P2 + /// Sassafras runtime errors. + #[pallet::error] + pub enum Error { + /// Submitted configuration is invalid. + InvalidConfiguration, + // TODO-SASS P2 ... + } + + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; + + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; + + /// Next epoch authorities. + #[pallet::storage] + pub type NextAuthorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; + + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + /// The epoch randomness for the *current* epoch. + /// + /// # Security + /// + /// This MUST NOT be used for gambling, as it can be influenced by a + /// malicious validator in the short term. It MAY be used in many + /// cryptographic protocols, however, so long as one remembers that this + /// (like everything else on-chain) it is public. For example, it can be + /// used where a number is needed that cannot have been chosen by an + /// adversary, for purposes such as public-coin zero-knowledge proofs. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Next epoch randomness. + #[pallet::storage] + pub type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Current epoch randomness accumulator. + #[pallet::storage] + pub type RandomnessAccumulator = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + #[pallet::storage] + #[pallet::getter(fn initialized)] + pub type Initialized = StorageValue<_, Option>; + + /// The configuration for the current epoch. Should never be `None` as it is initialized in + /// genesis. + #[pallet::storage] + pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + + /// Current session tickets. + #[pallet::storage] + pub type Tickets = StorageValue<_, BoundedVec, ValueQuery>; + + /// Next session tickets. + // TODO-SASS-P2: probably the best thing is to store the tickets in a map + // Each map entry contains a vector of tickets as they are received. + #[pallet::storage] + pub type NextTickets = + StorageValue<_, BoundedBTreeSet, ValueQuery>; + + /// Genesis configuration for Sassafras protocol. + #[cfg_attr(feature = "std", derive(Default))] + #[pallet::genesis_config] + pub struct GenesisConfig { + /// Genesis authorities. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Genesis epoch configuration. + pub epoch_config: Option, + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_genesis_authorities(&self.authorities); + EpochConfig::::put( + self.epoch_config.clone().expect("epoch_config must not be None"), + ); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + /// Block initialization + fn on_initialize(now: BlockNumberFor) -> Weight { + Self::initialize(now); + 0 + } + + /// Block finalization + fn on_finalize(_now: BlockNumberFor) { + // At the end of the block, we can safely include the new VRF output from + // this block into the randomness accumulator. If we've determined + // that this block was the first in a new epoch, the changeover logic has + // already occurred at this point, so the under-construction randomness + // will only contain outputs from the right epoch. + // TODO-SASS-P2: maybe here we can `expect` that is initialized (panic if not) + if let Some(pre_digest) = Initialized::::take().flatten() { + let authority_index = pre_digest.authority_index; + + let randomness: Option = Authorities::::get() + .get(authority_index as usize) + .and_then(|(authority, _)| { + schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() + }) + .and_then(|pubkey| { + let current_slot = CurrentSlot::::get(); + + let transcript = sp_consensus_sassafras::make_slot_transcript( + &Self::randomness(), + current_slot, + EpochIndex::::get(), + ); + + let vrf_output = pre_digest.block_vrf_output; + + // This has already been verified by the client on block import. + debug_assert!(pubkey + .vrf_verify( + transcript.clone(), + &vrf_output, + &pre_digest.block_vrf_proof + ) + .is_ok()); + + vrf_output.0.attach_input_hash(&pubkey, transcript).ok() + }) + .map(|inout| { + inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX) + }); + + // TODO-SASS-P2: this should be infallible. Randomness should be always deposited. + // Eventually better to panic here? + if let Some(randomness) = randomness { + Self::deposit_randomness(&randomness); + } + } + } + } + + #[pallet::call] + impl Pallet { + /// Submit next epoch tickets. + #[pallet::weight(10_000)] + pub fn submit_tickets(origin: OriginFor, tickets: Vec) -> DispatchResult { + ensure_none(origin)?; + + log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); + + // We have to traverse the tickets list one by one to verify the SNARK proofs. + let mut next_tickets = NextTickets::::get(); + + // 1. validate proof + // 2. append to sorted list + // TODO-SASS-P2: use a scattered structure for tickets + next_tickets = next_tickets.try_mutate(|tree| { + for ticket in tickets.iter() { + tree.insert(*ticket); + } + let max_tickets = T::MaxTickets::get() as usize; + if tree.len() > max_tickets { + // Remove the mid values + // TODO-SASS-P2: with the new structure this will be reimplemented... + let diff = tree.len() - max_tickets; + let off = max_tickets / 2; + let val = tree.iter().nth(off).cloned().unwrap(); + let mut mid = tree.split_off(&val); + let val = mid.iter().nth(diff).cloned().unwrap(); + let mut tail = mid.split_off(&val); + tree.append(&mut tail); + log::warn!(target: "sassafras", "🌳 TICKETS OVERFLOW, drop {} tickets... (len = {})", diff, tree.len()); + } + }).expect("Tickets list len is within the allowed bounds; qed."); + + NextTickets::::put(next_tickets); + + Ok(()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_tickets { tickets } = call { + // Discard tickets not coming from the local node + log::debug!(target: "sassafras::runtime", "🌳 Validating unsigned from {} source", + match source { + TransactionSource::Local => "local", + TransactionSource::InBlock => "in-block", + TransactionSource::External => "external", + } + ); + + if source == TransactionSource::External { + // TODO-SASS-P2: double check this `Local` requirement... + // If we only allow these txs on block production, then there is less chance to + // submit our tickets if we don't have enough authoring slots. + // If we have 0 slots => we have zero chances. + // Maybe this is one valid reason to introduce proxies. + log::warn!( + target: "sassafras::runtime", + "🌳 Rejecting unsigned transaction from external sources.", + ); + return InvalidTransaction::BadSigner.into() + } + + // Current slot should be less than half of epoch duration. + if Self::current_slot_epoch_index() >= T::EpochDuration::get() / 2 { + log::warn!( + target: "sassafras::runtime", + "🌳 Timeout to propose tickets, bailing out.", + ); + return InvalidTransaction::Stale.into() + } + + // TODO-SASS-P2 more validation steps: + // 1. epoch index + // 2. signed by an authority for current epoch + // 3. single submission attempt from validator? + + ValidTransaction::with_tag_prefix("Sassafras") + // We assign the maximum priority for any equivocation report. + .priority(TransactionPriority::max_value()) + // TODO-SASS-P2: if possible use a more efficient way to distinquish + // duplicates... + .and_provides(tickets) + // TODO-SASS-P2: this should be set such that it is discarded after the first + // half + .longevity(3_u64) + .propagate(true) + .build() + } else { + InvalidTransaction::Call.into() + } + } + } +} + +// Inherent methods +impl Pallet { + /// Determine the Sassafras slot duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // TODO-SASS-P2: clarify why this is doubled (copied verbatim from BABE) + // We double the minimum block-period so each author can always propose within + // the majority of their slot. + ::MinimumPeriod::get().saturating_mul(2u32.into()) + } + + /// Determine whether an epoch change should take place at this block. + /// Assumes that initialization has already taken place. + pub fn should_epoch_change(now: T::BlockNumber) -> bool { + // The epoch has technically ended during the passage of time between this block and the + // last, but we have to "end" the epoch now, since there is no earlier possible block we + // could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having + // started at the slot of block 1. We want to use the same randomness and validator set as + // signalled in the genesis, so we don't rotate the epoch. + now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() + } + + fn current_slot_epoch_index() -> u64 { + Self::slot_epoch_index(CurrentSlot::::get()) + } + + fn slot_epoch_index(slot: Slot) -> u64 { + if *GenesisSlot::::get() == 0 { + return 0 + } + *slot.saturating_sub(Self::current_epoch_start()) + } + + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` + /// has returned `true`, and the caller is the only caller of this function. + /// + /// Typically, this is not handled directly by the user, but by higher-level validator-set + /// manager logic like `pallet-session`. + pub fn enact_epoch_change( + authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, + next_authorities: WeakBoundedVec< + (AuthorityId, SassafrasAuthorityWeight), + T::MaxAuthorities, + >, + ) { + // TODO-SASS-P2: we don't depend on session module... + + // PRECONDITION: caller has done initialization and is guaranteed by the session module to + // be called before this. + debug_assert!(Self::initialized().is_some()); + + // Update epoch index + let epoch_index = EpochIndex::::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + EpochIndex::::put(epoch_index); + + // Update authorities + Authorities::::put(authorities); + NextAuthorities::::put(&next_authorities); + + // Update epoch randomness. + let next_epoch_index = epoch_index + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + // Returns randomness for the current epoch and computes the *next* + // epoch randomness. + let randomness = Self::randomness_change_epoch(next_epoch_index); + Randomness::::put(randomness); + + // // Update the start blocks of the previous and new current epoch. + // >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { + // *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); + // *current_epoch_start_block = >::block_number(); + // }); + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + + let next_randomness = NextRandomness::::get(); + + let next_epoch = NextEpochDescriptor { + authorities: next_authorities.to_vec(), + randomness: next_randomness, + }; + Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); + + // if let Some(next_config) = NextEpochConfig::::get() { + // EpochConfig::::put(next_config); + // } + + // if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { + // let next_epoch_config: BabeEpochConfiguration = + // pending_epoch_config_change.clone().into(); + // NextEpochConfig::::put(next_epoch_config); + // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); + // } + + Self::enact_tickets(); + } + + /// Enact next epoch tickets list. + /// To work properly this should be done as the last action of the last epoch slot. + /// (i.e. current tickets list is not used at this point) + fn enact_tickets() { + // TODO-SASS-P2: manage skipped epoch by killing both Tickets and NextTickets + + let mut tickets = NextTickets::::get().into_iter().collect::>(); + log::debug!(target: "sassafras", "🌳 @@@@@@@@@ Enacting {} tickets", tickets.len()); + + if tickets.len() > T::MaxTickets::get() as usize { + log::error!(target: "sassafras", "🌳 should never happen..."); + let max = T::MaxTickets::get() as usize; + tickets.truncate(max); + } + let tickets = BoundedVec::::try_from(tickets) + .expect("vector has been eventually truncated; qed"); + + Tickets::::put(tickets); + NextTickets::::kill(); + } + + /// Finds the start slot of the current epoch. Only guaranteed to give correct results after + /// `initialize` of the first block in the chain (as its result is based off of `GenesisSlot`). + pub fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::::get()) + } + + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); + + epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() + } + + fn deposit_consensus(new: U) { + let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, new.encode()); + >::deposit_log(log) + } + + fn deposit_randomness(randomness: &schnorrkel::Randomness) { + let mut s = RandomnessAccumulator::::get().to_vec(); + s.extend_from_slice(randomness); + let accumulator = sp_io::hashing::blake2_256(&s); + RandomnessAccumulator::::put(accumulator); + } + + // Initialize authorities on genesis phase. + // TODO-SASS-P2: temporary fix to make the compiler happy + #[allow(dead_code)] + fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { + if !authorities.is_empty() { + assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); + } + } + + fn initialize_genesis_epoch(genesis_slot: Slot) { + GenesisSlot::::put(genesis_slot); + debug_assert_ne!(*GenesisSlot::::get(), 0); + + // Deposit a log because this is the first block in epoch #0. We use the same values + // as genesis because we haven't collected any randomness yet. + let next = NextEpochDescriptor { + authorities: Self::authorities().to_vec(), + randomness: Self::randomness(), + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)); + } + + fn initialize(now: T::BlockNumber) { + // Since `initialize` can be called twice (e.g. if session module is present) + // let's ensure that we only do the initialization once per block + if Self::initialized().is_some() { + return + } + + let pre_digest = >::digest() + .logs + .iter() + .filter_map(|s| s.as_pre_runtime()) + .filter_map(|(id, mut data)| { + if id == SASSAFRAS_ENGINE_ID { + PreDigest::decode(&mut data).ok() + } else { + None + } + }) + .next(); + + // TODO-SASS-P2: maybe here we have to assert! the presence of pre_digest... + // Every valid sassafras block should come with a pre-digest + + if let Some(ref pre_digest) = pre_digest { + // The slot number of the current block being initialized + let current_slot = pre_digest.slot; + + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + Self::initialize_genesis_epoch(current_slot) + } + + CurrentSlot::::put(current_slot); + } + + Initialized::::put(pre_digest); + + // enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now); + } + + /// Call this function exactly once when an epoch changes, to update the randomness. + /// Returns the new randomness. + fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { + let this_randomness = NextRandomness::::get(); + let accumulator = RandomnessAccumulator::::get(); + + let mut s = Vec::with_capacity(2 * this_randomness.len() + 8); + s.extend_from_slice(&this_randomness); + s.extend_from_slice(&next_epoch_index.to_le_bytes()); + s.extend_from_slice(&accumulator); + + let next_randomness = sp_io::hashing::blake2_256(&s); + NextRandomness::::put(&next_randomness); + + this_randomness + } + + /// Fetch expected ticket for the given slot. + // TODO-SASS-P2: This is a very inefficient and temporary solution. + // On refactory we will come up with a better solution (like a scattered vector). + pub fn slot_ticket(slot: Slot) -> Option { + let duration = T::EpochDuration::get(); + let slot_idx = Self::slot_epoch_index(slot); // % duration; + + // Given a list of ordered tickets: t0, t1, t2, ..., tk to be assigned to N slots (N>k) + // The tickets are assigned to the slots in the following order: t1, t3, ..., t4, t2, t0. + + let ticket_index = |slot_idx| { + let ticket_idx = if slot_idx < duration / 2 { + 2 * slot_idx + 1 + } else { + 2 * (duration - (slot_idx + 1)) + }; + log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); + ticket_idx as usize + }; + + // If this is a ticket for an epoch not enacted yet we have to fetch it from the + // `NextTickets` list. For example, this may happen when an author request the first + // ticket of a new epoch. + if slot_idx < duration { + let tickets = Tickets::::get(); + let idx = ticket_index(slot_idx); + tickets.get(idx).cloned() + } else { + let tickets = NextTickets::::get(); + // Do not use modulus since we want to eventually return `None` for slots crossing the + // epoch boundaries. + let idx = ticket_index(slot_idx - duration); + tickets.iter().nth(idx).cloned() + } + } + + /// Submit next epoch validator tickets via an unsigned extrinsic. + pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); + let call = Call::submit_tickets { tickets }; + SubmitTransaction::>::submit_unsigned_transaction(call.into()).is_ok() + } +} + +impl BoundToRuntimeAppPublic for Pallet { + type Public = AuthorityId; +} diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..08d089c4b7682 --- /dev/null +++ b/primitives/consensus/sassafras/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "sp-consensus-sassafras" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Primitives for Sassafras consensus" +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sp-consensus-sassafras" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +async-trait = { version = "0.1.50", optional = true } +scale-codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +merlin = { version = "2.0", default-features = false } +scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +serde = { version = "1.0.136", features = ["derive"], optional = true } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } +sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../application-crypto" } +sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } +sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../vrf" } +sp-core = { version = "6.0.0", default-features = false, path = "../../core" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } +sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../../keystore" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../std" } + sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } + +[features] +default = ["std"] +std = [ + "async-trait", + "merlin/std", + "scale-codec/std", + "scale-info/std", + "serde", + "sp-api/std", + "sp-application-crypto/std", + "sp-consensus-slots/std", + "sp-consensus-vrf/std", + "sp-core/std", + "sp-inherents/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std", + "sp-timestamp", +] diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs new file mode 100644 index 0000000000000..68116c6b91f70 --- /dev/null +++ b/primitives/consensus/sassafras/src/digests.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Private implementation details of Sassafras digests. + +use super::{ + AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, Slot, TicketInfo, + SASSAFRAS_ENGINE_ID, +}; + +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; +use sp_runtime::{DigestItem, RuntimeDebug}; +use sp_std::vec::Vec; + +/// Sassafras primary slot assignment pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct PreDigest { + /// Validator index. + pub authority_index: AuthorityIndex, + /// Corresponding slot number. + pub slot: Slot, + /// Block VRF output. + pub block_vrf_output: VRFOutput, + /// Block VRF proof. + pub block_vrf_proof: VRFProof, + /// Ticket information. + pub ticket_info: Option, +} + +/// Information about the next epoch. This is broadcast in the first block +/// of the epoch. +#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +pub struct NextEpochDescriptor { + /// The authorities. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// The value of randomness to use for the slot-assignment. + pub randomness: Randomness, +} + +/// An consensus log item for BABE. +#[derive(Decode, Encode, Clone, PartialEq, Eq)] +pub enum ConsensusLog { + /// The epoch has changed. This provides information about the _next_ + /// epoch - information about the _current_ epoch (i.e. the one we've just + /// entered) should already be available earlier in the chain. + #[codec(index = 1)] + NextEpochData(NextEpochDescriptor), + /// Disable the authority with given index. + #[codec(index = 2)] + OnDisabled(AuthorityIndex), +} + +/// A digest item which is usable with Sassafras consensus. +pub trait CompatibleDigestItem: Sized { + /// Construct a digest item which contains a Sassafras pre-digest. + fn sassafras_pre_digest(seal: PreDigest) -> Self; + + /// If this item is an Sassafras pre-digest, return it. + fn as_sassafras_pre_digest(&self) -> Option; + + /// Construct a digest item which contains a Sassafras seal. + fn sassafras_seal(signature: AuthoritySignature) -> Self; + + /// If this item is a Sassafras signature, return the signature. + fn as_sassafras_seal(&self) -> Option; +} + +impl CompatibleDigestItem for DigestItem { + fn sassafras_pre_digest(digest: PreDigest) -> Self { + DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, digest.encode()) + } + + fn as_sassafras_pre_digest(&self) -> Option { + self.pre_runtime_try_to(&SASSAFRAS_ENGINE_ID) + } + + fn sassafras_seal(signature: AuthoritySignature) -> Self { + DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) + } + + fn as_sassafras_seal(&self) -> Option { + self.seal_try_to(&SASSAFRAS_ENGINE_ID) + } +} diff --git a/primitives/consensus/sassafras/src/inherents.rs b/primitives/consensus/sassafras/src/inherents.rs new file mode 100644 index 0000000000000..6af6e4b4732c6 --- /dev/null +++ b/primitives/consensus/sassafras/src/inherents.rs @@ -0,0 +1,102 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Inherents for Sassafras + +use sp_inherents::{Error, InherentData, InherentIdentifier}; +use sp_std::result::Result; + +/// The Sassafras inherent identifier. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; + +/// The type of the Sassafras inherent. +pub type InherentType = sp_consensus_slots::Slot; + +/// Auxiliary trait to extract Sassafras inherent data. +pub trait SassafrasInherentData { + /// Get Sassafras inherent data. + fn sassafras_inherent_data(&self) -> Result, Error>; + /// Replace Sassafras inherent data. + fn sassafras_replace_inherent_data(&mut self, new: InherentType); +} + +impl SassafrasInherentData for InherentData { + fn sassafras_inherent_data(&self) -> Result, Error> { + self.get_data(&INHERENT_IDENTIFIER) + } + + fn sassafras_replace_inherent_data(&mut self, new: InherentType) { + self.replace_data(INHERENT_IDENTIFIER, &new); + } +} + +/// Provides the slot duration inherent data for Sassafras. +// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 +#[cfg(feature = "std")] +pub struct InherentDataProvider { + slot: InherentType, +} + +#[cfg(feature = "std")] +impl InherentDataProvider { + /// Create new inherent data provider from the given `slot`. + pub fn new(slot: InherentType) -> Self { + Self { slot } + } + + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp_and_slot_duration( + timestamp: sp_timestamp::Timestamp, + slot_duration: sp_consensus_slots::SlotDuration, + ) -> Self { + let slot = InherentType::from_timestamp(timestamp, slot_duration); + + Self { slot } + } + + /// Returns the `slot` of this inherent data provider. + pub fn slot(&self) -> InherentType { + self.slot + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.slot + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) + } + + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + // There is no error anymore + None + } +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..0546c99c52984 --- /dev/null +++ b/primitives/consensus/sassafras/src/lib.rs @@ -0,0 +1,192 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives for Sassafras +//! TODO-SASS-P2 : write proper docs + +#![deny(warnings)] +#![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] +#![cfg_attr(not(feature = "std"), no_std)] + +pub use merlin::Transcript; + +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +#[cfg(feature = "std")] +use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; +use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use sp_std::vec::Vec; + +pub use sp_consensus_slots::{Slot, SlotDuration}; +pub use sp_consensus_vrf::schnorrkel::{ + Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, +}; + +/// Key type for Sassafras module. +pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + +pub mod digests; +pub mod inherents; + +mod app { + use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; + app_crypto!(sr25519, SASSAFRAS); +} + +/// The index of an authority. +pub type AuthorityIndex = u32; + +/// The prefix used by Sassafras for its ticket VRF keys. +pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf"; + +/// The prefix used by Sassafras for its post-block VRF keys. +pub const SASSAFRAS_BLOCK_VRF_PREFIX: &[u8] = b"substrate-sassafras-block-vrf"; + +/// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in +/// the main Sassafras module. If that ever changes, then this must, too. +#[cfg(feature = "std")] +pub type AuthorityPair = app::Pair; + +/// Sassafras authority signature. +pub type AuthoritySignature = app::Signature; + +/// Sassafras authority identifier. Necessarily equivalent to the schnorrkel public key used in +/// the main Sassafras module. If that ever changes, then this must, too. +pub type AuthorityId = app::Public; + +/// The `ConsensusEngineId` of BABE. +pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; + +/// The length of the public key +pub const PUBLIC_KEY_LENGTH: usize = 32; + +/// The weight of an authority. +// NOTE: we use a unique name for the weight to avoid conflicts with other +// `Weight` types, since the metadata isn't able to disambiguate. +pub type SassafrasAuthorityWeight = u64; + +/// Weight of a Sassafras block. +/// Primary blocks have a weight of 1 whereas secondary blocks have a weight of 0. +pub type SassafrasBlockWeight = u32; + +/// Configuration data used by the Sassafras consensus engine. +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] +pub struct SassafrasGenesisConfiguration { + /// The slot duration in milliseconds for Sassafras. + pub slot_duration: u64, + /// The duration of epochs in slots. + pub epoch_length: u64, + /// The authorities for the genesis epoch. + pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// The randomness for the genesis epoch. + pub randomness: Randomness, +} + +/// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct SassafrasEpochConfiguration { + // TODO-SASS-P2 + // x: redundancy_factor + // a: attempts number + // L: bound on aa number of tickets that can be gossiped +} + +/// Ticket type. +pub type Ticket = VRFOutput; + +/// Ticket information. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketInfo { + /// Authority index. + pub authority_index: u32, + /// Attempt number. + pub attempt: u32, + /// Ticket proof. + pub proof: VRFProof, +} + +/// Make slot VRF transcript. +pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_u64(b"slot number", *slot); + transcript.append_u64(b"current epoch", epoch); + transcript.append_message(b"chain randomness", &randomness[..]); + transcript +} + +/// Make slot VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_slot_transcript_data( + randomness: &Randomness, + slot: Slot, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + ("slot number", VRFTranscriptValue::U64(*slot)), + ("current epoch", VRFTranscriptValue::U64(epoch)), + ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} + +/// Make ticket VRF transcript. +pub fn make_ticket_transcript(randomness: &[u8], attempt: u64, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(b"type", b"ticket"); + transcript.append_u64(b"attempt", attempt); + transcript.append_u64(b"current epoch", epoch); + transcript.append_message(b"chain randomness", randomness); + transcript +} + +/// Make ticket VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_ticket_transcript_data( + randomness: &[u8], + attempt: u64, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + ("type", VRFTranscriptValue::Bytes(b"ticket".to_vec())), + ("attempt", VRFTranscriptValue::U64(attempt)), + ("current epoch", VRFTranscriptValue::U64(epoch)), + ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} + +sp_api::decl_runtime_apis! { + /// API necessary for block authorship with Sassafras. + pub trait SassafrasApi { + /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. + fn configuration() -> SassafrasGenesisConfiguration; + + /// Submit next epoch validator tickets via an unsigned extrinsic. + /// This method returns `false` when creation of the extrinsics fails. + fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; + + /// Get expected ticket for the given slot. + fn slot_ticket(slot: Slot) -> Option; + } +} diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 8666de6c4bc0c..c271019d24041 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -27,7 +27,7 @@ use sp_std::{ }; pub use schnorrkel::{ - vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + vrf::{VRFInOut, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, PublicKey, SignatureError, }; @@ -35,7 +35,7 @@ pub use schnorrkel::{ pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; /// VRF output type available for `std` environment, suitable for schnorrkel operations. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); impl Deref for VRFOutput { diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 80b44449dbac1..7004e8b253ea4 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -1107,6 +1107,8 @@ pub mod key_types { /// Key type for Babe module, built-in. Identified as `babe`. pub const BABE: KeyTypeId = KeyTypeId(*b"babe"); + /// Key type for Sassafras module, built-in. Identified as `sass`. + pub const SASSAFRAS: KeyTypeId = KeyTypeId(*b"sass"); /// Key type for Grandpa module, built-in. Identified as `gran`. pub const GRANDPA: KeyTypeId = KeyTypeId(*b"gran"); /// Key type for controlling an account in a Substrate runtime, built-in. Identified as `acco`. diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs index 7409353afe9f4..f0c6d7b68a691 100644 --- a/primitives/keystore/src/vrf.rs +++ b/primitives/keystore/src/vrf.rs @@ -31,6 +31,7 @@ pub enum VRFTranscriptValue { /// Value is a u64 integer U64(u64), } + /// VRF Transcript data #[derive(Clone, Encode)] pub struct VRFTranscriptData { @@ -39,6 +40,7 @@ pub struct VRFTranscriptData { /// Additional data to be registered into the transcript pub items: Vec<(&'static str, VRFTranscriptValue)>, } + /// VRF signature data pub struct VRFSignature { /// The VRFOutput serialized From b8cea2ff64c528002306403f4cdf59994d5ffc1b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 19:01:35 +0200 Subject: [PATCH 02/62] Sassafras consensus - Prototype 2.1 (#11889) * Code refactory, e.g. remove most of Babe duplicated code. * Improved tickets management strategy (double buffering). * Validators and configuration change. * Session pallet integration. --- Cargo.lock | 4 +- bin/node-sassafras/node/Cargo.toml | 12 +- bin/node-sassafras/node/src/chain_spec.rs | 75 +- bin/node-sassafras/node/src/cli.rs | 4 + bin/node-sassafras/node/src/command.rs | 26 + bin/node-sassafras/node/src/service.rs | 6 +- bin/node-sassafras/runtime/Cargo.toml | 7 +- bin/node-sassafras/runtime/src/lib.rs | 116 +- client/consensus/sassafras/src/authorship.rs | 549 +++++-- client/consensus/sassafras/src/aux_schema.rs | 2 +- .../consensus/sassafras/src/block_import.rs | 368 +++++ client/consensus/sassafras/src/lib.rs | 1286 ++--------------- .../consensus/sassafras/src/verification.rs | 331 ++++- frame/sassafras/Cargo.toml | 3 +- frame/sassafras/src/benchmarking.rs | 54 + frame/sassafras/src/lib.rs | 646 +++++---- frame/sassafras/src/mock.rs | 231 +++ frame/sassafras/src/session.rs | 114 ++ frame/sassafras/src/tests.rs | 414 ++++++ primitives/consensus/sassafras/src/digests.rs | 16 +- primitives/consensus/sassafras/src/lib.rs | 125 +- primitives/consensus/sassafras/src/vrf.rs | 92 ++ 22 files changed, 2733 insertions(+), 1748 deletions(-) create mode 100644 client/consensus/sassafras/src/block_import.rs create mode 100644 frame/sassafras/src/benchmarking.rs create mode 100644 frame/sassafras/src/mock.rs create mode 100644 frame/sassafras/src/session.rs create mode 100644 frame/sassafras/src/tests.rs create mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index f1819c85d7bef..cf1eb38e6d2fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4935,8 +4935,8 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-grandpa", - "pallet-randomness-collective-flip", "pallet-sassafras", + "pallet-session", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -6253,6 +6253,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-session", "pallet-timestamp", @@ -6260,7 +6261,6 @@ dependencies = [ "scale-info", "sp-application-crypto", "sp-consensus-sassafras", - "sp-consensus-vrf", "sp-core", "sp-io", "sp-runtime", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index c99e7bf5ef0ed..9133c2141c837 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -66,7 +66,15 @@ substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build [features] default = [] -runtime-benchmarks = ["node-sassafras-runtime/runtime-benchmarks"] +runtime-benchmarks = [ + "node-sassafras-runtime/runtime-benchmarks" +] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-sassafras-runtime/try-runtime", "try-runtime-cli"] +try-runtime = [ + "node-sassafras-runtime/try-runtime", + "try-runtime-cli" +] +use-session-pallet = [ + "node-sassafras-runtime/use-session-pallet" +] diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index ed189a6964976..965fc197277c8 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -2,14 +2,17 @@ use node_sassafras_runtime::{ AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; +#[cfg(feature = "use-session-pallet")] +use node_sassafras_runtime::{SessionConfig, SessionKeys}; use sc_service::ChainType; -use sp_consensus_sassafras::AuthorityId as SassafrasId; +use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; -// The URL for the telemetry server. -// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +// Genesis constants for Sassafras parameters configuration. +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 32; +const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; @@ -23,7 +26,7 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; -/// Generate an account ID from seed. +/// Generate an account id from seed. pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic: From<::Public>, @@ -31,47 +34,40 @@ where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Generate authority keys from seed. -pub fn authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { - (get_from_seed::(s), get_from_seed::(s)) +/// Generate authority account id and keys from seed. +pub fn authority_keys_from_seed(seed: &str) -> (AccountId, SassafrasId, GrandpaId) { + ( + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) } pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Development", - // ID "dev", ChainType::Development, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, None, - // Properties None, - // Extensions None, )) } @@ -80,19 +76,14 @@ pub fn local_testnet_config() -> Result { let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( - // Name "Local Testnet", - // ID "local_testnet", ChainType::Local, move || { testnet_genesis( wasm_binary, - // Initial PoA authorities vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], - // Sudo account get_account_id_from_seed::("Alice"), - // Pre-funded accounts vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -107,19 +98,13 @@ pub fn local_testnet_config() -> Result { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], - true, ) }, - // Bootnodes vec![], - // Telemetry None, - // Protocol ID None, - // Properties None, None, - // Extensions None, )) } @@ -127,10 +112,9 @@ pub fn local_testnet_config() -> Result { /// Configure initial storage state for FRAME modules. fn testnet_genesis( wasm_binary: &[u8], - initial_authorities: Vec<(SassafrasId, GrandpaId)>, + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, - _enable_println: bool, ) -> GenesisConfig { GenesisConfig { system: SystemConfig { @@ -141,18 +125,39 @@ fn testnet_genesis( // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, - sassafras: SassafrasConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone(), 0)).collect(), - epoch_config: Some(node_sassafras_runtime::SASSAFRAS_GENESIS_EPOCH_CONFIG), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 0)).collect(), + epoch_config: SassafrasEpochConfiguration { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, }, grandpa: GrandpaConfig { - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect(), }, sudo: SudoConfig { // Assign network admin rights. key: Some(root_key), }, transaction_payment: Default::default(), + #[cfg(feature = "use-session-pallet")] + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + SessionKeys { sassafras: x.1.clone(), grandpa: x.2.clone() }, + ) + }) + .collect::>(), + }, } } diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs index bb2ffa1938107..4ab4d34210c98 100644 --- a/bin/node-sassafras/node/src/cli.rs +++ b/bin/node-sassafras/node/src/cli.rs @@ -36,6 +36,10 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), + /// Sub-commands concerned with benchmarking. + #[clap(subcommand)] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// Try some command against runtime state. #[cfg(feature = "try-runtime")] TryRuntime(try_runtime_cli::TryRuntimeCmd), diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index cf17c37968f54..74ac7dc809802 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -3,6 +3,7 @@ use crate::{ cli::{Cli, Subcommand}, service, }; +use frame_benchmarking_cli::BenchmarkCmd; use node_sassafras_runtime::Block; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -102,6 +103,31 @@ pub fn run() -> sc_cli::Result<()> { Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) }) }, + Some(Subcommand::Benchmark(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + // This switch needs to be in the client, since the client decides + // which sub-commands it wants to support. + match cmd { + BenchmarkCmd::Pallet(cmd) => { + if !cfg!(feature = "runtime-benchmarks") { + return Err( + "Runtime benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into(), + ) + } + + cmd.run::(config) + }, + _ => { + println!("Not implemented..."); + Ok(()) + }, + } + }) + }, #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index ec8f10c1a59b1..33f66262c6dda 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -116,12 +116,12 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( - sc_consensus_sassafras::Config::get(&*client)?, + sc_consensus_sassafras::configuration(&*client)?, grandpa_block_import, client.clone(), )?; - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let import_queue = sc_consensus_sassafras::import_queue( sassafras_link.clone(), @@ -269,7 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let slot_duration = sassafras_link.config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration(); let sassafras_config = sc_consensus_sassafras::SassafrasParams { client: client.clone(), diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 233d9e0e14bbb..823e1dc2bd4eb 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -17,9 +17,9 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } @@ -62,8 +62,8 @@ std = [ "pallet-sassafras/std", "pallet-balances/std", "pallet-grandpa/std", - "pallet-randomness-collective-flip/std", "pallet-sudo/std", + "pallet-session/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -87,6 +87,7 @@ runtime-benchmarks = [ "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -96,8 +97,8 @@ try-runtime = [ "frame-system/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", - "pallet-randomness-collective-flip/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", ] +use-session-pallet = [] diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c458605375ab1..c428931e99dbe 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -6,41 +6,34 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use pallet_grandpa::{ - fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, -}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(feature = "use-session-pallet")] +use sp_runtime::traits::OpaqueKeys; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + ApplyExtrinsicResult, MultiSignature, Perbill, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// A few exports that help ease life for downstream crates. -pub use frame_support::{ +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use frame_support::{ construct_runtime, parameter_types, - traits::{ - ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, - }, + traits::{ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem}, weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - IdentityFee, Weight, + constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, }, - StorageValue, }; -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::CurrencyAdapter; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; /// An index to a block. pub type BlockNumber = u32; @@ -79,12 +72,12 @@ pub mod opaque { pub type Block = generic::Block; /// Opaque block identifier type. pub type BlockId = generic::BlockId; +} - impl_opaque_keys! { - pub struct SessionKeys { - pub sassafras: Sassafras, - pub grandpa: Grandpa, - } +impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, } } @@ -135,12 +128,6 @@ pub const DAYS: BlockNumber = HOURS * 24; pub const MAX_AUTHORITIES: u32 = 32; -/// The Sassafras epoch configuration at genesis. -pub const SASSAFRAS_GENESIS_EPOCH_CONFIG: sp_consensus_sassafras::SassafrasEpochConfiguration = - sp_consensus_sassafras::SassafrasEpochConfiguration { - // TODO-SASS-P2 - }; - /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -222,8 +209,6 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl pallet_randomness_collective_flip::Config for Runtime {} - parameter_types! { pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -232,11 +217,12 @@ parameter_types! { impl pallet_sassafras::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; + #[cfg(feature = "use-session-pallet")] + type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + #[cfg(not(feature = "use-session-pallet"))] type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; type MaxAuthorities = ConstU32; type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; - // TODO-SASS-P4. Add some redundancy before starting tickets drop. - type MaxSubmittedTickets = ConstU32<{ 3 * EPOCH_DURATION_IN_SLOTS as u32 }>; } impl pallet_grandpa::Config for Runtime { @@ -255,7 +241,6 @@ impl pallet_grandpa::Config for Runtime { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; @@ -263,13 +248,11 @@ impl pallet_timestamp::Config for Runtime { } impl pallet_balances::Config for Runtime { + type Event = Event; type MaxLocks = ConstU32<50>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; - /// The type for recording an account's balance. type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU128<500>; type AccountStore = System; @@ -290,7 +273,40 @@ impl pallet_sudo::Config for Runtime { type Call = Call; } -// Create the runtime by composing the FRAME pallets that were previously configured. +#[cfg(feature = "use-session-pallet")] +impl pallet_session::Config for Runtime { + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); //pallet_staking::StashOf; + type ShouldEndSession = Sassafras; + type NextSessionRotation = Sassafras; + type SessionManager = (); //pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = pallet_session::weights::SubstrateWeight; +} + +// Create a runtime using session pallet +#[cfg(feature = "use-session-pallet")] +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + Session: pallet_session, + } +); + +// Create a runtime NOT using session pallet +#[cfg(not(feature = "use-session-pallet"))] construct_runtime!( pub enum Runtime where Block = Block, @@ -298,7 +314,6 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, - RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Sassafras: pallet_sassafras, Grandpa: pallet_grandpa, @@ -310,10 +325,13 @@ construct_runtime!( /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; + /// Block header type as expected by this runtime. pub type Header = generic::Header; + /// Block type as expected by this runtime. pub type Block = generic::Block; + /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckNonZeroSender, @@ -325,10 +343,13 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -349,6 +370,8 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] + [pallet_grandpa, Grandpa] + [pallet_sassafras, Sassafras] ); } @@ -411,12 +434,13 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasGenesisConfiguration { - sp_consensus_sassafras::SassafrasGenesisConfiguration { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + sp_consensus_sassafras::SassafrasConfiguration { slot_duration: Sassafras::slot_duration(), - epoch_length: EpochDuration::get(), - genesis_authorities: Sassafras::authorities().to_vec(), + epoch_duration: EpochDuration::get(), + authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), + threshold_params: Sassafras::config(), } } @@ -433,13 +457,13 @@ impl_runtime_apis! { impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { - opaque::SessionKeys::generate(seed) + SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec, ) -> Option, KeyTypeId)>> { - opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + SessionKeys::decode_into_raw_public_keys(&encoded) } } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index c8f39497ffa5e..8f1aa1115d2a5 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,35 +16,32 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Sassafras authority selection and slot claiming. +//! Types and functions related to authority selection and slot claiming. -use crate::Epoch; +use super::*; -use scale_codec::Encode; -use sp_application_crypto::AppKey; use sp_consensus_sassafras::{ - digests::PreDigest, make_slot_transcript_data, make_ticket_transcript_data, AuthorityId, Slot, - Ticket, TicketInfo, SASSAFRAS_TICKET_VRF_PREFIX, + digests::PreDigest, + vrf::{make_slot_transcript_data, make_ticket_transcript_data}, + AuthorityId, Slot, Ticket, TicketInfo, }; -use sp_consensus_vrf::schnorrkel::{PublicKey, VRFInOut, VRFOutput, VRFProof}; use sp_core::{twox_64, ByteArray}; -use sp_keystore::{vrf::make_transcript, SyncCryptoStore, SyncCryptoStorePtr}; /// Get secondary authority index for the given epoch and slot. -#[inline] -pub fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> u64 { - u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) % - epoch.authorities.len() as u64 +pub(crate) fn secondary_authority_index(slot: Slot, config: &SassafrasConfiguration) -> u64 { + u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) % + config.authorities.len() as u64 } /// Try to claim an epoch slot. /// If ticket is `None`, then the slot should be claimed using the fallback mechanism. -pub fn claim_slot( +fn claim_slot( slot: Slot, epoch: &Epoch, ticket: Option, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { + let config = &epoch.config; let (authority_index, ticket_info) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); @@ -56,97 +53,61 @@ pub fn claim_slot( }, None => { log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); - (secondary_authority_index(slot, epoch), None) + (secondary_authority_index(slot, config), None) }, }; - let authority_id = epoch.authorities.get(authority_index as usize).map(|auth| &auth.0)?; + let authority_id = config.authorities.get(authority_index as usize).map(|auth| &auth.0)?; - let transcript_data = make_slot_transcript_data(&epoch.randomness, slot, epoch.epoch_index); - let result = SyncCryptoStore::sr25519_vrf_sign( + let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_index); + let signature = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, authority_id.as_ref(), transcript_data, - ); - - match result { - Ok(Some(signature)) => { - let pre_digest = PreDigest { - authority_index: authority_index as u32, - slot, - block_vrf_output: VRFOutput(signature.output), - block_vrf_proof: VRFProof(signature.proof.clone()), - ticket_info, - }; - Some((pre_digest, authority_id.clone())) - }, - _ => None, - } -} - -/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: -/// - x: redundancy factor; -/// - s: number of slots in epoch; -/// - a: max number of attempts; -/// - v: number of validator in epoch. -/// The parameters should be chosen such that T <= 1. -/// If `attempts * validators` is zero then we fallback to T = 0 -// TODO-SASS-P3: this formula must be double-checked... -#[inline] -fn calculate_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> u128 { - let den = attempts as u128 * validators as u128; - let num = redundancy as u128 * slots as u128; - let res = u128::MAX.checked_div(den).unwrap_or(0).saturating_mul(num); + ) + .ok() + .flatten()?; - // TODO-SASS-P4 remove me - log::debug!( - target: "sassafras", - "🌳 Tickets threshold: {} {:016x}", num as f64 / den as f64, res, - ); - res -} + let pre_digest = PreDigest { + authority_index: authority_index as u32, + slot, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof.clone()), + ticket_info, + }; -/// Returns true if the given VRF output is lower than the given threshold, false otherwise. -#[inline] -pub fn check_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(SASSAFRAS_TICKET_VRF_PREFIX)) < threshold + Some((pre_digest, authority_id.clone())) } /// Generate the tickets for the given epoch. /// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` /// structure. The additional information will be used during epoch to claim slots. -pub fn generate_epoch_tickets( - epoch: &mut Epoch, - max_attempts: u32, - redundancy_factor: u32, - keystore: &SyncCryptoStorePtr, -) -> Vec { +pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { + let config = &epoch.config; + let max_attempts = config.threshold_params.attempts_number; + let redundancy_factor = config.threshold_params.redundancy_factor; let mut tickets = vec![]; - let threshold = calculate_threshold( + let threshold = sp_consensus_sassafras::compute_threshold( redundancy_factor, - epoch.duration as u32, + config.epoch_duration as u32, max_attempts, - epoch.authorities.len() as u32, + config.authorities.len() as u32, ); + // TODO-SASS-P4 remove me + log::debug!(target: "sassafras", "🌳 Tickets threshold: {:032x}", threshold); - let authorities = epoch.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); + let authorities = config.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); for (authority_index, authority_id) in authorities { - let raw_key = authority_id.to_raw_vec(); - - if !SyncCryptoStore::has_keys(&**keystore, &[(raw_key.clone(), AuthorityId::ID)]) { + if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) + { continue } - let public = match PublicKey::from_bytes(&raw_key) { - Ok(public) => public, - Err(_) => continue, - }; - - let get_ticket = |attempt| { + let make_ticket = |attempt| { let transcript_data = - make_ticket_transcript_data(&epoch.randomness, attempt as u64, epoch.epoch_index); + make_ticket_transcript_data(&config.randomness, attempt, epoch.epoch_index); // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. @@ -158,13 +119,11 @@ pub fn generate_epoch_tickets( ) .ok()??; - let transcript = make_transcript(transcript_data); - let inout = signature.output.attach_input_hash(&public, transcript).ok()?; - if !check_threshold(&inout, threshold) { + let ticket = VRFOutput(signature.output); + if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { return None } - let ticket = VRFOutput(signature.output); let ticket_info = TicketInfo { attempt: attempt as u32, authority_index: authority_index as u32, @@ -175,7 +134,7 @@ pub fn generate_epoch_tickets( }; for attempt in 0..max_attempts { - if let Some((ticket, ticket_info)) = get_ticket(attempt) { + if let Some((ticket, ticket_info)) = make_ticket(attempt) { tickets.push(ticket); epoch.tickets_info.insert(ticket, ticket_info); } @@ -183,3 +142,427 @@ pub fn generate_epoch_tickets( } tickets } + +struct SassafrasSlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + genesis_config: SassafrasConfiguration, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SassafrasSlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "sassafras" + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn epoch_data( + &self, + parent: &B::Header, + slot: Slot, + ) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .map(|epoch| epoch.as_ref().config.authorities.len()) + } + + async fn claim_slot( + &self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); + + // Get the next slot ticket from the runtime. + let block_id = BlockId::Hash(parent_header.hash()); + let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; + + // TODO-SASS-P2 + debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); + + let claim = authorship::claim_slot( + slot, + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))? + .as_ref(), + ticket, + &self.keystore, + ); + if claim.is_some() { + debug!(target: "sassafras", "🌳 Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + true + } else { + false + }, + } + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![::sassafras_pre_digest(claim.0.clone())] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges<>::Transaction, B>, + (_, public): Self::Claim, + epoch_descriptor: Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>::Transaction>, + sp_consensus::Error, + > { + // Sign the pre-sealed hash of the block and then add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*self.keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = ::sassafras_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok(import_block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO-SASS-P2 + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO-SASS-P2 + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +async fn tickets_worker( + client: Arc, + keystore: SyncCryptoStorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let tickets = epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| authorship::generate_epoch_tickets(epoch, &keystore)) + .unwrap_or_default(); + + if tickets.is_empty() { + continue + } + + // Get the best block on which we will build and send the tickets. + let best_id = match select_chain.best_chain().await { + Ok(header) => BlockId::Hash(header.hash()), + Err(err) => { + error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); + continue + }, + }; + + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + if let Some(err) = err { + error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + // Remove tickets from epoch tree node. + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|epoch| epoch.tickets_info.clear()); + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +/// Parameters for Sassafras. +pub struct SassafrasParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: SyncCryptoStorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + /// The source of timestamps for relative slots + pub sassafras_link: SassafrasLink, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + can_author_with, + }: SassafrasParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + PreCommitActions + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer>, + I: BlockImport> + + Send + + Sync + + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + CAW: CanAuthorWith + Send + Sync + 'static, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let slot_worker = SassafrasSlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + genesis_config: sassafras_link.genesis_config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + sassafras_link.genesis_config.slot_duration(), + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), + sync_oracle, + create_inherent_data_providers, + can_author_with, + ); + + let tickets_worker = tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 59f53415a31d2..07f723341b069 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Schema for Sassafras epoch changes in the auxiliary db. +//! Schema for auxiliary data persistence. use scale_codec::{Decode, Encode}; diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs new file mode 100644 index 0000000000000..3630589aeb46a --- /dev/null +++ b/client/consensus/sassafras/src/block_import.rs @@ -0,0 +1,368 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block import. + +use super::*; + +/// A block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + genesis_config: self.genesis_config.clone(), + } + } +} + +impl SassafrasBlockImport { + /// Constructor. + pub fn new( + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid sassafras headers must contain a predigest; header has been already verified; qed", + ); + let slot = pre_digest.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( + "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ + header has already been verified; qed", + ); + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // If there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + // Use an extra scope to make the compiler happy, because otherwise he complains about the + // mutex, even if we dropped it... + let mut epoch_changes = { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + + let added_weight = pre_digest.ticket_info.is_some() as u32; + let total_weight = parent_weight + added_weight; + + // Search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + }) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "sassafras", + log_level, + "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "sassafras", + log_level, + "🌳 🍁 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| { + ConsensusError::ClientImport(format!( + "Error importing epoch changes: {}", + e + )) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + *epoch_changes = + old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + "No block weight for parent header.".to_string(), + ) + })? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; + // Release the mutex, but it stays locked + epoch_changes.release_mutex() + }; + + let import_result = self.inner.import_block(block, new_cache).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + if import_result.is_err() { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes.upgrade() = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + if info.block_gap.is_none() { + epoch_changes.clear_gap(); + } + + let finalized_slot = { + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect( + "best finalized hash was given by client; finalized headers must exist in db; qed", + ); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + genesis_config: SassafrasConfiguration, + inner_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + HeaderBackend + HeaderMetadata + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let link = SassafrasLink { + epoch_changes: epoch_changes.clone(), + genesis_config: genesis_config.clone(), + }; + + let block_import = + SassafrasBlockImport::new(inner_block_import, client, epoch_changes, genesis_config); + + Ok((block_import, link)) +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c83b84cb0ff37..d81b8788fbae9 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -52,7 +52,8 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, }, - import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue}, + Verifier, }; use sc_consensus_epochs::{ descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, @@ -70,8 +71,8 @@ use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; -use sp_consensus_slots::{Slot, SlotDuration}; -use sp_core::{crypto::ByteArray, ExecutionContext}; +use sp_consensus_slots::Slot; +use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ @@ -84,78 +85,22 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, + vrf::{make_slot_transcript, make_ticket_transcript}, AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, SassafrasGenesisConfiguration, Ticket, TicketInfo, VRFOutput, - VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, TicketInfo, VRFOutput, VRFProof, + SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; mod authorship; mod aux_schema; +mod block_import; mod verification; -/// Sassafras epoch information -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] -pub struct Epoch { - /// The epoch index. - pub epoch_index: u64, - /// The starting slot of the epoch. - pub start_slot: Slot, - /// The duration of this epoch in slots. - pub duration: u64, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], - /// Configuration of the epoch. - pub config: SassafrasEpochConfiguration, - /// Tickets metadata. - pub tickets_info: BTreeMap, -} - -impl EpochT for Epoch { - type NextEpochDescriptor = NextEpochDescriptor; - type Slot = Slot; +pub use authorship::{start_sassafras, SassafrasParams, SassafrasWorker}; +pub use block_import::{block_import, SassafrasBlockImport}; +pub use verification::SassafrasVerifier; - fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - // TODO-SASS-P2: allow config change on epoch change - config: self.config.clone(), - tickets_info: BTreeMap::new(), - } - } - - fn start_slot(&self) -> Slot { - self.start_slot - } - - fn end_slot(&self) -> Slot { - self.start_slot + self.duration - } -} - -impl Epoch { - /// Create the genesis epoch (epoch #0). This is defined to start at the slot of - /// the first block, so that has to be provided. - pub fn genesis(genesis_config: &SassafrasGenesisConfiguration, slot: Slot) -> Epoch { - Epoch { - epoch_index: 0, - start_slot: slot, - duration: genesis_config.epoch_length, - authorities: genesis_config.genesis_authorities.clone(), - randomness: genesis_config.randomness, - config: SassafrasEpochConfiguration {}, - tickets_info: BTreeMap::new(), - } - } -} - -/// Errors encountered by the Sassafras authorship task. -/// TODO-SASS-P2: remove unused errors. +/// Errors encountered by the Sassafras routines. #[derive(Debug, thiserror::Error)] pub enum Error { /// Multiple Sassafras pre-runtime digests @@ -167,12 +112,6 @@ pub enum Error { /// Multiple Sassafras epoch change digests #[error("Multiple Sassafras epoch change digests")] MultipleEpochChangeDigests, - // /// Multiple Sassafras config change digests - // #[error("Multiple Sassafras config change digests, rejecting!")] - // MultipleConfigChangeDigests, - // /// Could not extract timestamp and slot - // #[error("Could not extract timestamp and slot: {0}")] - // Extraction(sp_consensus::Error), /// Could not fetch epoch #[error("Could not fetch epoch at {0:?}")] FetchEpoch(B::Hash), @@ -197,12 +136,6 @@ pub enum Error { /// Bad signature #[error("Bad signature on {0:?}")] BadSignature(B::Hash), - // /// Invalid author: Expected secondary author - // #[error("Invalid author: Expected secondary author: {0:?}, got: {1:?}.")] - // InvalidAuthor(AuthorityId, AuthorityId), - // /// VRF verification of block by author failed - // #[error("VRF verification of block by author {0:?} failed: threshold {1} exceeded")] - // VRFVerificationOfBlockFailed(AuthorityId, u128), /// VRF verification failed #[error("VRF verification failed: {0:?}")] VRFVerificationFailed(SignatureError), @@ -215,9 +148,6 @@ pub enum Error { /// Expected epoch change to happen. #[error("Expected epoch change to happen at {0:?}, s{1}")] ExpectedEpochChange(B::Hash, Slot), - // /// Unexpected config change. - // #[error("Unexpected config change")] - // UnexpectedConfigChange, /// Unexpected epoch change #[error("Unexpected epoch change")] UnexpectedEpochChange, @@ -250,509 +180,107 @@ impl From> for String { } } +// Convenience function fn sassafras_err(error: Error) -> Error { error!(target: "sassafras", "🌳 {}", error); error } -/// Intermediate value passed to block importer. -pub struct SassafrasIntermediate { - /// The epoch descriptor. - pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +/// Sassafras epoch information +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// Epoch configuration + pub config: SassafrasConfiguration, + /// Tickets metadata. + pub tickets_info: BTreeMap, } -/// Intermediate key for Babe engine. -pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; - -/// Configuration for Sassafras used for defining block verification parameters as -/// well as authoring (e.g. the slot duration). -#[derive(Clone)] -pub struct Config { - genesis_config: SassafrasGenesisConfiguration, -} +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; -impl Config { - /// Read Sassafras genesis configuration from the runtime. - /// - /// TODO-SASS-P4: (FIXME) - /// This doesn't return the genesis configuration, but the Configuration at best block. - /// There is an open [PR](https://github.com/paritytech/substrate/pull/11760) for BABE, - /// we'll follow the same strategy once it is closed. - pub fn get(client: &C) -> ClientResult - where - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: SassafrasApi, - { - let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); - if client.usage_info().chain.finalized_state.is_none() { - debug!(target: "sassafras", "🌳 No finalized state is available. Reading config from genesis"); - best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + let config = SassafrasConfiguration { + slot_duration: self.config.slot_duration, + epoch_duration: self.config.epoch_duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + threshold_params: descriptor.config.unwrap_or(self.config.threshold_params.clone()), + }; + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + config.epoch_duration, + config, + tickets_info: BTreeMap::new(), } - - let genesis_config = client.runtime_api().configuration(&best_block_id)?; - - Ok(Config { genesis_config }) } - /// Get the genesis configuration. - pub fn genesis_config(&self) -> &SassafrasGenesisConfiguration { - &self.genesis_config + fn start_slot(&self) -> Slot { + self.start_slot } - /// Get the slot duration defined in the genesis configuration. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.genesis_config.slot_duration) + fn end_slot(&self) -> Slot { + self.start_slot + self.config.slot_duration } } -/// Parameters for Sassafras. -pub struct SassafrasParams { - /// The client to use - pub client: Arc, - /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, - /// The chain selection strategy - pub select_chain: SC, - /// The environment we are producing blocks for. - pub env: EN, - /// The underlying block-import object to supply our produced blocks to. - /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise - /// critical consensus logic will be omitted. - pub block_import: I, - /// A sync oracle - pub sync_oracle: SO, - /// Hook into the sync module to control the justification sync process. - pub justification_sync_link: L, - /// Something that can create the inherent data providers. - pub create_inherent_data_providers: CIDP, - /// Force authoring of blocks even if we are offline - pub force_authoring: bool, - /// The source of timestamps for relative slots - pub sassafras_link: SassafrasLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, -} - -/// Start the Sassafras worker. -pub fn start_sassafras( - SassafrasParams { - client, - keystore, - select_chain, - env, - block_import, - sync_oracle, - justification_sync_link, - create_inherent_data_providers, - force_authoring, - sassafras_link, - can_author_with, - }: SassafrasParams, -) -> Result, sp_consensus::Error> -where - B: BlockT, - C: ProvideRuntimeApi - + ProvideUncles - + BlockchainEvents - + PreCommitActions - + HeaderBackend - + HeaderMetadata - + Send - + Sync - + 'static, - C::Api: SassafrasApi, - SC: SelectChain + 'static, - EN: Environment + Send + Sync + 'static, - EN::Proposer: Proposer>, - I: BlockImport> - + Send - + Sync - + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - L: sc_consensus::JustificationSyncLink + 'static, - CIDP: CreateInherentDataProviders + Send + Sync + 'static, - CIDP::InherentDataProviders: InherentDataProviderExt + Send, - CAW: CanAuthorWith + Send + Sync + 'static, - ER: std::error::Error + Send + From + From + 'static, -{ - info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); - - let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); - - let worker = SassafrasSlotWorker { - client: client.clone(), - block_import, - env, - sync_oracle: sync_oracle.clone(), - justification_sync_link, - force_authoring, - keystore: keystore.clone(), - epoch_changes: sassafras_link.epoch_changes.clone(), - slot_notification_sinks: slot_notification_sinks.clone(), - config: sassafras_link.config.clone(), - }; - - let slot_worker = sc_consensus_slots::start_slot_worker( - sassafras_link.config.slot_duration(), - select_chain.clone(), - sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), - sync_oracle, - create_inherent_data_providers, - can_author_with, - ); - - let ticket_worker = tickets_worker( - client.clone(), - keystore, - sassafras_link.epoch_changes.clone(), - select_chain, - ); - - let inner = future::select(Box::pin(slot_worker), Box::pin(ticket_worker)); - - Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) -} - -async fn tickets_worker( - client: Arc, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - select_chain: SC, -) where - B: BlockT, - C: BlockchainEvents + ProvideRuntimeApi, - C::Api: SassafrasApi, - SC: SelectChain + 'static, -{ - let mut notifications = client.import_notification_stream(); - while let Some(notification) = notifications.next().await { - let epoch_desc = match find_next_epoch_digest::(¬ification.header) { - Ok(Some(epoch_desc)) => epoch_desc, - Err(err) => { - warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); - continue - }, - _ => continue, - }; - - debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); - - let tickets = { - let mut epoch_changes = epoch_changes.shared_data(); - - let number = *notification.header.number(); - let position = if number == One::one() { - EpochIdentifierPosition::Genesis1 - } else { - EpochIdentifierPosition::Regular - }; - let mut epoch_identifier = - EpochIdentifier { position, hash: notification.hash, number }; - - let epoch = match epoch_changes.epoch_mut(&mut epoch_identifier) { - Some(epoch) => epoch, - None => { - warn!(target: "sassafras", "🌳 Unexpected missing epoch data for {}", notification.hash); - continue - }, - }; - - authorship::generate_epoch_tickets(epoch, 30, 1, &keystore) - }; - - if tickets.is_empty() { - continue - } - - // Get the best block on which we will build and send the tickets. - let best_id = match select_chain.best_chain().await { - Ok(header) => BlockId::Hash(header.hash()), - Err(err) => { - error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); - continue - }, - }; - - let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { - Err(err) => Some(err.to_string()), - Ok(false) => Some("Unknown reason".to_string()), - _ => None, - }; - if let Some(err) = err { - error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); - // TODO-SASS-P2: on error remove tickets from epoch... +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(config: &SassafrasConfiguration, slot: Slot) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot, + config: config.clone(), + tickets_info: BTreeMap::new(), } } } -/// Worker for Sassafras which implements `Future`. This must be polled. -pub struct SassafrasWorker { - inner: Pin + Send + 'static>>, - slot_notification_sinks: SlotNotificationSinks, -} - -impl SassafrasWorker { - /// Return an event stream of notifications for when new slot happens, and the corresponding - /// epoch descriptor. - pub fn slot_notification_stream( - &self, - ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { - const CHANNEL_BUFFER_SIZE: usize = 1024; - - let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); - self.slot_notification_sinks.lock().push(sink); - stream - } -} - -impl Future for SassafrasWorker { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.inner.as_mut().poll(cx) - } -} - -/// Slot notification sinks. -type SlotNotificationSinks = Arc< - Mutex::Hash, NumberFor, Epoch>)>>>, ->; - -struct SassafrasSlotWorker { - client: Arc, - block_import: I, - env: E, - sync_oracle: SO, - justification_sync_link: L, - force_authoring: bool, - keystore: SyncCryptoStorePtr, - epoch_changes: SharedEpochChanges, - slot_notification_sinks: SlotNotificationSinks, - config: Config, -} - -#[async_trait::async_trait] -impl sc_consensus_slots::SimpleSlotWorker - for SassafrasSlotWorker +/// Read latest finalized protocol configuration. +pub fn configuration(client: &C) -> ClientResult where B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C: ProvideRuntimeApi + UsageProvider, C::Api: SassafrasApi, - E: Environment + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Clone + Sync, - L: sc_consensus::JustificationSyncLink, - ER: std::error::Error + Send + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; - type Claim = (PreDigest, AuthorityId); - type SyncOracle = SO; - type JustificationSyncLink = L; - type CreateProposer = - Pin> + Send + 'static>>; - type Proposer = E::Proposer; - type BlockImport = I; - - fn logging_target(&self) -> &'static str { - "sassafras" - } - - fn block_import(&mut self) -> &mut Self::BlockImport { - &mut self.block_import - } - - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { - self.epoch_changes - .shared_data() - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - *parent.number(), - slot, - ) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) - } - - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .map(|epoch| epoch.as_ref().authorities.len()) - } - - async fn claim_slot( - &self, - parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) -> Option { - debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); - - // Get the next slot ticket from the runtime. - let block_id = BlockId::Hash(parent_header.hash()); - let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; - - // TODO-SASS-P2 - debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); - - let claim = authorship::claim_slot( - slot, - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - })? - .as_ref(), - ticket, - &self.keystore, - ); - - if claim.is_some() { - debug!(target: "sassafras", "🌳 Claimed slot {}", slot); - } - claim - } - - fn notify_slot( - &self, - _parent_header: &B::Header, - slot: Slot, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } - }); - } - - fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { - vec![::sassafras_pre_digest(claim.0.clone())] - } - - async fn block_import_params( - &self, - header: B::Header, - header_hash: &B::Hash, - body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, - (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, - > { - // Sign the pre-sealed hash of the block and then add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - ::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), - ) - })?; - let signature: AuthoritySignature = signature - .clone() - .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = ::sassafras_seal(signature); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - - Ok(import_block) - } - - fn force_authoring(&self) -> bool { - self.force_authoring - } - - fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { - // TODO-SASS-P2 - false - } - - fn sync_oracle(&mut self) -> &mut Self::SyncOracle { - &mut self.sync_oracle - } - - fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { - &mut self.justification_sync_link - } - - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin( - self.env - .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), - ) - } - - fn telemetry(&self) -> Option { - // TODO-SASS-P2 - None - } - - fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); - - // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' - let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + let info = client.usage_info().chain; + let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { + debug!(target: "sassafras", "🌳 Reading config from genesis"); + info.genesis_hash + }); + + let config = client.runtime_api().configuration(&BlockId::Hash(hash))?; + Ok(config) +} - sc_consensus_slots::proposing_remaining_duration( - parent_slot, - slot_info, - &block_proposal_slot_portion, - None, - sc_consensus_slots::SlotLenienceType::Exponential, - self.logging_target(), - ) - } +/// Intermediate value passed to block importer from authoring or validation logic. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; + /// Extract the Sassafras pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -pub fn find_pre_digest(header: &B::Header) -> Result> { +fn find_pre_digest(header: &B::Header) -> Result> { // Genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { const PROOF: &str = "zero sequence is a valid vrf output/proof; qed"; - let block_vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); - let block_vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); + let vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); + let vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); return Ok(PreDigest { authority_index: 0, slot: 0.into(), - block_vrf_output, - block_vrf_proof, + vrf_output, + vrf_proof, ticket_info: None, }) } @@ -791,639 +319,17 @@ fn find_next_epoch_digest( /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct SassafrasLink { + /// Epoch changes tree epoch_changes: SharedEpochChanges, - config: Config, + /// Startup configuration. Read from runtime at last finalized block. + genesis_config: SassafrasConfiguration, } impl SassafrasLink { - /// Get the epoch changes of this link. - pub fn epoch_changes(&self) -> &SharedEpochChanges { - &self.epoch_changes - } - /// Get the config of this link. - pub fn config(&self) -> &Config { - &self.config - } -} - -/// A verifier for Sassafras blocks. -pub struct SassafrasVerifier { - client: Arc, - select_chain: SelectChain, - create_inherent_data_providers: CIDP, - config: Config, - epoch_changes: SharedEpochChanges, - can_author_with: CAW, - telemetry: Option, -} - -impl SassafrasVerifier -where - Block: BlockT, - Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith, - CIDP: CreateInherentDataProviders, -{ - async fn check_inherents( - &self, - block: Block, - block_id: BlockId, - inherent_data: InherentData, - create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, - ) -> Result<(), Error> { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "sassafras", - "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - - let inherent_res = self - .client - .runtime_api() - .check_inherents_with_context(&block_id, execution_context, block, inherent_data) - .map_err(Error::RuntimeApi)?; - - if !inherent_res.ok() { - for (i, e) in inherent_res.into_errors() { - match create_inherent_data_providers.try_handle_error(&i, &e).await { - Some(res) => res.map_err(|e| Error::CheckInherents(e))?, - None => return Err(Error::CheckInherentsUnhandled(i)), - } - } - } - - Ok(()) - } - - async fn check_and_report_equivocation( - &self, - slot_now: Slot, - slot: Slot, - header: &Block::Header, - author: &AuthorityId, - origin: &BlockOrigin, - ) -> Result<(), Error> { - // Don't report any equivocations during initial sync as they are most likely stale. - if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()) - } - - // Check if authorship of this header is an equivocation and return a proof if so. - let equivocation_proof = - match check_equivocation(&*self.client, slot_now, slot, header, author) - .map_err(Error::Client)? - { - Some(proof) => proof, - None => return Ok(()), - }; - - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - slot, - equivocation_proof.first_header.hash(), - equivocation_proof.second_header.hash(), - ); - - // Get the best block on which we will build and send the equivocation report. - let _best_id: BlockId = self - .select_chain - .best_chain() - .await - .map(|h| BlockId::Hash(h.hash())) - .map_err(|e| Error::Client(e.into()))?; - - // TODO-SASS-P2 - - Ok(()) - } -} - -type BlockVerificationResult = - Result<(BlockImportParams, Option)>>), String>; - -#[async_trait::async_trait] -impl Verifier - for SassafrasVerifier -where - Block: BlockT, - Client: HeaderMetadata - + HeaderBackend - + ProvideRuntimeApi - + Send - + Sync - + AuxStore, - Client::Api: BlockBuilderApi + SassafrasApi, - SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith + Send + Sync, - CIDP: CreateInherentDataProviders + Send + Sync, - CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, -{ - async fn verify( - &mut self, - mut block: BlockImportParams, - ) -> BlockVerificationResult { - trace!( - target: "sassafras", - "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", - block.origin, - block.header, - block.justifications, - block.body, - ); - - if block.with_state() { - // When importing whole state we don't calculate epoch descriptor, but rather - // read it from the state after import. We also skip all verifications - // because there's no parent state and we trust the sync module to verify - // that the state is correct and finalized. - return Ok((block, Default::default())) - } - - trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); - - let hash = block.header.hash(); - let parent_hash = *block.header.parent_hash(); - - let create_inherent_data_providers = self - .create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await - .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; - - let slot_now = create_inherent_data_providers.slot(); - - let parent_header_metadata = self - .client - .header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; - - let pre_digest = find_pre_digest::(&block.header)?; - - let (check_header, epoch_descriptor) = { - let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot, - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or(Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or(Error::::FetchEpoch(parent_hash))?; - - let ticket = self - .client - .runtime_api() - .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) - .map_err(|err| err.to_string())?; - - let v_params = verification::VerificationParams { - header: block.header.clone(), - pre_digest, - slot_now, - epoch: viable_epoch.as_ref(), - ticket, - }; - - (verification::check_header::(v_params)?, epoch_descriptor) - }; - - match check_header { - CheckedHeader::Checked(pre_header, verified_info) => { - let sassafras_pre_digest = verified_info - .pre_digest - .as_sassafras_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - let slot = sassafras_pre_digest.slot; - - // The header is valid but let's check if there was something else already - // proposed at the same slot by the given author. If there was, we will - // report the equivocation to the runtime. - if let Err(err) = self - .check_and_report_equivocation( - slot_now, - slot, - &block.header, - &verified_info.author, - &block.origin, - ) - .await - { - warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); - } - - // If the body is passed through, we need to use the runtime to check that the - // internally-set timestamp in the inherents actually matches the slot set in the - // seal. - if let Some(inner_body) = block.body { - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(slot); - let new_block = Block::new(pre_header.clone(), inner_body); - - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; - - let (_, inner_body) = new_block.deconstruct(); - block.body = Some(inner_body); - } - - trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); - telemetry!( - self.telemetry; - CONSENSUS_TRACE; - "sassafras.checked_and_importing"; - "pre_header" => ?pre_header, - ); - - block.header = pre_header; - block.post_digests.push(verified_info.seal); - block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); - block.post_hash = Some(hash); - - Ok((block, Default::default())) - }, - CheckedHeader::Deferred(a, b) => { - debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!( - self.telemetry; - CONSENSUS_DEBUG; - "sassafras.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(Error::::TooFarInFuture(hash).into()) - }, - } - } -} - -/// A block-import handler for Sassafras. -/// -/// This scans each imported block for epoch change announcements. The announcements are -/// tracked in a tree (of all forks), and the import logic validates all epoch change -/// transitions, i.e. whether a given epoch change is expected or whether it is missing. -/// -/// The epoch change tree should be pruned as blocks are finalized. -pub struct SassafrasBlockImport { - inner: I, - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, -} - -impl Clone for SassafrasBlockImport { - fn clone(&self) -> Self { - SassafrasBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - epoch_changes: self.epoch_changes.clone(), - config: self.config.clone(), - } - } -} - -impl SassafrasBlockImport { - fn new( - client: Arc, - epoch_changes: SharedEpochChanges, - block_import: I, - config: Config, - ) -> Self { - SassafrasBlockImport { client, inner: block_import, epoch_changes, config } - } -} - -#[async_trait::async_trait] -impl BlockImport for SassafrasBlockImport -where - Block: BlockT, - Inner: BlockImport> + Send + Sync, - Inner::Error: Into, - Client: HeaderBackend - + HeaderMetadata - + AuxStore - + ProvideRuntimeApi - + Send - + Sync, - Client::Api: SassafrasApi + ApiExt, -{ - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - async fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let number = *block.header.number(); - - let pre_digest = find_pre_digest::(&block.header).expect( - "valid sassafras headers must contain a predigest; header has been already verified; qed", - ); - let slot = pre_digest.slot; - - let parent_hash = *block.header.parent_hash(); - let parent_header = self - .client - .header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), - ) - })?; - - let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( - "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ - header has already been verified; qed", - ); - - // Make sure that slot number is strictly increasing - if slot <= parent_slot { - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), - )) - } - - // If there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; - - // Use an extra scope to make the compiler happy, because otherwise he complains about the - // mutex, even if we dropped it... - let mut epoch_changes = { - let mut epoch_changes = self.epoch_changes.shared_data_locked(); - - // Check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ClientImport( - sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) - .into(), - ) - })? - }; - - let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - - let added_weight = pre_digest.ticket_info.is_some() as u32; - let total_weight = parent_weight + added_weight; - - // Search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some()) { - (true, false) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )), - (false, true) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::UnexpectedEpochChange).into(), - )), - _ => (), - } - - let info = self.client.info(); - - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some((*epoch_changes).clone()); - - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) - .ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; - - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; - - log!(target: "sassafras", - log_level, - "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); - - let next_epoch = viable_epoch.increment(next_epoch_descriptor); - - log!(target: "sassafras", - log_level, - "🌳 🍁 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); - - // Prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized(self.client.clone(), &mut epoch_changes)?; - - epoch_changes - .import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ) - .map_err(|e| { - ConsensusError::ClientImport(format!( - "Error importing epoch changes: {}", - e - )) - })?; - - Ok(()) - }; - - if let Err(e) = prune_and_import() { - debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); - *epoch_changes = - old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e) - } - - aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { - block - .auxiliary - .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - } - - aux_schema::write_block_weight(hash, total_weight, |values| { - block - .auxiliary - .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - "No block weight for parent header.".to_string(), - ) - })? - }; - - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) - }; - // Release the mutex, but it stays locked - epoch_changes.release_mutex() - }; - - let import_result = self.inner.import_block(block, new_cache).await; - - // Revert to the original epoch changes in case there's an error - // importing the block - if import_result.is_err() { - if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes.upgrade() = old_epoch_changes; - } - } - - import_result.map_err(Into::into) - } - - async fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).await.map_err(Into::into) - } -} - -/// Gets the best finalized block and its slot, and prunes the given epoch tree. -fn prune_finalized( - client: Arc, - epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> -where - B: BlockT, - C: HeaderBackend + HeaderMetadata, -{ - let info = client.info(); - if info.block_gap.is_none() { - epoch_changes.clear_gap(); + pub fn genesis_config(&self) -> &SassafrasConfiguration { + &self.genesis_config } - - let finalized_slot = { - let finalized_header = client - .header(BlockId::Hash(info.finalized_hash)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect( - "best finalized hash was given by client; finalized headers must exist in db; qed", - ); - - find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; valid blocks have a pre-digest; qed") - .slot - }; - - epoch_changes - .prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - Ok(()) -} - -/// Produce a Sassafras block-import object to be used later on in the construction of -/// an import-queue. -/// -/// Also returns a link object used to correctly instantiate the import queue -/// and background worker. -pub fn block_import( - config: Config, - wrapped_block_import: I, - client: Arc, -) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> -where - C: AuxStore + HeaderBackend + HeaderMetadata + 'static, -{ - let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; - - let link = SassafrasLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; - - // NOTE: this isn't entirely necessary, but since we didn't use to prune the - // epoch tree it is useful as a migration, so that nodes prune long trees on - // startup rather than waiting until importing the next epoch change block. - prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; - - let import = SassafrasBlockImport::new(client, epoch_changes, wrapped_block_import, config); - - Ok((import, link)) } /// Start an import queue for the Sassafras consensus algorithm. @@ -1434,9 +340,9 @@ where /// /// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, /// otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( sassafras_link: SassafrasLink, - block_import: Inner, + block_import: BI, justification_import: Option>, client: Arc, select_chain: SelectChain, @@ -1447,13 +353,6 @@ pub fn import_queue( telemetry: Option, ) -> ClientResult> where - Inner: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync - + 'static, Client: ProvideRuntimeApi + HeaderBackend + HeaderMetadata @@ -1462,20 +361,27 @@ where + Sync + 'static, Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + BI: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = SassafrasVerifier { + let verifier = SassafrasVerifier::new( + client, select_chain, create_inherent_data_providers, - config: sassafras_link.config, - epoch_changes: sassafras_link.epoch_changes, + sassafras_link.epoch_changes, can_author_with, telemetry, - client, - }; + sassafras_link.genesis_config, + ); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 3c4dbef92f01a..b162fe390ef03 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -16,17 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Verification for Sassafras headers. - -use super::{authorship, sassafras_err, BlockT, Epoch, Error}; -use sc_consensus_slots::CheckedHeader; -use sp_consensus_sassafras::{ - digests::{CompatibleDigestItem, PreDigest}, - make_slot_transcript, make_ticket_transcript, AuthorityId, AuthorityPair, Ticket, -}; -use sp_consensus_slots::Slot; -use sp_core::{ByteArray, Pair}; -use sp_runtime::{traits::Header, DigestItem}; +//! Types and functions related to block verification. + +use super::*; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; @@ -64,13 +56,14 @@ pub fn check_header( params: VerificationParams, ) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch, ticket } = params; + let config = &epoch.config; // Check that the slot is not in the future, with some drift being allowed. if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let author = match epoch.authorities.get(pre_digest.authority_index as usize) { + let author = match config.authorities.get(pre_digest.authority_index as usize) { Some(author) => author.0.clone(), None => return Err(sassafras_err(Error::SlotAuthorNotFound)), }; @@ -100,18 +93,15 @@ pub fn check_header( // TODO-SASS-P2 ... we can eventually remove auth index from ticket info log::error!(target: "sassafras", "🌳 Wrong primary authority index"); } - let transcript = make_ticket_transcript( - &epoch.randomness, - ticket_info.attempt as u64, - epoch.epoch_index, - ); + let transcript = + make_ticket_transcript(&config.randomness, ticket_info.attempt, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { log::debug!(target: "sassafras", "🌳 checking secondary"); - let idx = authorship::secondary_authority_index(pre_digest.slot, params.epoch); + let idx = authorship::secondary_authority_index(pre_digest.slot, config); if idx != pre_digest.authority_index as u64 { log::error!(target: "sassafras", "🌳 Wrong secondary authority index"); } @@ -128,13 +118,11 @@ pub fn check_header( }, } - // Check block-vrf proof + // Check slot-vrf proof - let transcript = make_slot_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_index); schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| { - p.vrf_verify(transcript, &pre_digest.block_vrf_output, &pre_digest.block_vrf_proof) - }) + .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; let info = VerifiedHeaderInfo { @@ -145,3 +133,300 @@ pub fn check_header( Ok(CheckedHeader::Checked(header, info)) } + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, +} + +impl + SassafrasVerifier +{ + /// Constructor. + pub fn new( + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + can_author_with: CAW, + telemetry: Option, + genesis_config: SassafrasConfiguration, + ) -> Self { + SassafrasVerifier { + client, + select_chain, + create_inherent_data_providers, + epoch_changes, + can_author_with, + telemetry, + genesis_config, + } + } +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "sassafras", + "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let _best_id: BlockId = self + .select_chain + .best_chain() + .await + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // TODO-SASS-P2 + + Ok(()) + } +} + +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> BlockVerificationResult { + trace!( + target: "sassafras", + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } + + trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&block.header)?; + + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let ticket = self + .client + .runtime_api() + .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) + .map_err(|err| err.to_string())?; + + let v_params = VerificationParams { + header: block.header.clone(), + pre_digest, + slot_now, + epoch: viable_epoch.as_ref(), + ticket, + }; + + (check_header::(v_params)?, epoch_descriptor) + }; + + match check_header { + CheckedHeader::Checked(pre_header, verified_info) => { + let sassafras_pre_digest = verified_info + .pre_digest + .as_sassafras_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + let slot = sassafras_pre_digest.slot; + + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &block.header, + &verified_info.author, + &block.origin, + ) + .await + { + warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(slot); + let new_block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + ); + block.post_hash = Some(hash); + + Ok((block, Default::default())) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 1d3839a9dcfb9..fc0c1940cc50d 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -23,7 +23,6 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -31,6 +30,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +hex-literal = "0.3" [features] default = ["std"] @@ -45,7 +45,6 @@ std = [ "scale-info/std", "sp-application-crypto/std", "sp-consensus-sassafras/std", - "sp-consensus-vrf/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs new file mode 100644 index 0000000000000..2f1818e5b52cd --- /dev/null +++ b/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use super::*; +use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_io::hashing; + +fn make_dummy_ticket(i: usize) -> Ticket { + let buf = i.to_le_bytes(); + hashing::twox_256(&buf).try_into().unwrap() +} + +benchmarks! { + submit_tickets { + let x in 0 .. 100; + + // Almost fill the available tickets space. + + let max_tickets: u32 = ::MaxTickets::get() - 10; + let tickets: Vec = (0..max_tickets as usize).into_iter().map(|i| { + make_dummy_ticket(i) + }).collect(); + let _ = Pallet::::submit_tickets(RawOrigin::None.into(), tickets); + + // Create the tickets to submit during the benchmark + + let tickets: Vec = (0..x as usize).into_iter().map(|i| { + make_dummy_ticket(i + max_tickets as usize) + }).collect(); + }: _(RawOrigin::None, tickets) + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 06155ec86877d..31678a6199ec7 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -1,4 +1,4 @@ -// Sassafras This file is part of Substrate. +// This file is part of Substrate. // Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Consensus extension module for Sassafras consensus. +//! Extension module for Sassafras consensus. //! //! Sassafras is a constant-time block production protocol that aims to ensure that //! there is exactly one block produced with constant time intervals rather multiple @@ -47,12 +47,16 @@ #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use scale_codec::{Decode, Encode}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; -use frame_support::{traits::Get, weights::Weight, BoundedBTreeSet, BoundedVec, WeakBoundedVec}; +use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; -use sp_application_crypto::ByteArray; -use sp_consensus_vrf::schnorrkel; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, + AuthorityId, Randomness, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, + SASSAFRAS_ENGINE_ID, +}; use sp_runtime::{ generic::DigestItem, traits::{One, Saturating}, @@ -60,53 +64,27 @@ use sp_runtime::{ }; use sp_std::prelude::Vec; -pub use sp_consensus_sassafras::{ - digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, - PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, -}; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] +mod mock; +#[cfg(all(feature = "std", test))] +mod tests; -// TODO-SASS-P2: tests and benches - -//#[cfg(test)] -//mod mock; -// -//#[cfg(test)] -//mod tests; -// -//#[cfg(feature = "runtime-benchmarks")] -//mod benchmarking; +pub mod session; pub use pallet::*; -/// Trigger an epoch change, if any should take place. -pub trait EpochChangeTrigger { - /// Trigger an epoch change, if any should take place. This should be called - /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); -} - -/// A type signifying to Sassafras that an external trigger for epoch changes -/// (e.g. pallet-session) is used. -pub struct ExternalTrigger; - -impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. -} - -/// A type signifying to Sassafras that it should perform epoch changes with an internal -/// trigger, recycling the same authorities forever. -pub struct SameAuthoritiesForever; - -impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); - let next_authorities = authorities.clone(); - - >::enact_epoch_change(authorities, next_authorities); - } - } +/// Tickets related metadata that is commonly used together. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of tickets available for even and odd session indices respectivelly. + /// I.e. the index is computed as session-index modulo 2. + pub tickets_count: [u32; 2], + /// Number of tickets segments + pub segments_count: u32, + /// Last segment has been already sorted + pub sort_started: bool, } #[frame_support::pallet] @@ -152,10 +130,6 @@ pub mod pallet { /// Max number of tickets that are considered for each epoch. #[pallet::constant] type MaxTickets: Get; - - /// Max number of tickets that we are going to consider for each epoch. - #[pallet::constant] - type MaxSubmittedTickets: Get; } // TODO-SASS-P2 @@ -212,37 +186,61 @@ pub mod pallet { /// adversary, for purposes such as public-coin zero-knowledge proofs. #[pallet::storage] #[pallet::getter(fn randomness)] - pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; /// Next epoch randomness. #[pallet::storage] - pub type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; - /// Current epoch randomness accumulator. + /// Randomness accumulator. #[pallet::storage] - pub type RandomnessAccumulator = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + pub type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. #[pallet::storage] #[pallet::getter(fn initialized)] - pub type Initialized = StorageValue<_, Option>; + pub type Initialized = StorageValue<_, PreDigest>; + + /// The configuration for the current epoch. + #[pallet::storage] + #[pallet::getter(fn config)] + pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; - /// The configuration for the current epoch. Should never be `None` as it is initialized in - /// genesis. + /// The configuration for the next epoch. + #[pallet::storage] + pub type NextEpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// TODO-SASS-P2: better doc? Double check if next epoch tickets were computed using NextEpoch + /// params in the native ecode. + /// In other words a config change submitted during session N will be enacted on session N+2. + /// This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for session N+1. #[pallet::storage] - pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + pub(super) type PendingEpochConfigChange = StorageValue<_, SassafrasEpochConfiguration>; - /// Current session tickets. + /// Stored tickets metadata. #[pallet::storage] - pub type Tickets = StorageValue<_, BoundedVec, ValueQuery>; + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets to be used for current and next session. + /// The key consists of a + /// - `u8` equal to session-index mod 2 + /// - `u32` equal to the slot-index. + #[pallet::storage] + pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; + + // /// Next session tickets temporary accumulator length. + // #[pallet::storage] + // pub type NextTicketsSegmentsCount = StorageValue<_, u32, ValueQuery>; - /// Next session tickets. - // TODO-SASS-P2: probably the best thing is to store the tickets in a map - // Each map entry contains a vector of tickets as they are received. + /// Next session tickets temporary accumulator. + /// Special u32::MAX key is reserved for partially sorted segment. #[pallet::storage] - pub type NextTickets = - StorageValue<_, BoundedBTreeSet, ValueQuery>; + pub type NextTicketsSegments = + StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; /// Genesis configuration for Sassafras protocol. #[cfg_attr(feature = "std", derive(Default))] @@ -251,16 +249,14 @@ pub mod pallet { /// Genesis authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Genesis epoch configuration. - pub epoch_config: Option, + pub epoch_config: SassafrasEpochConfiguration, } #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); - EpochConfig::::put( - self.epoch_config.clone().expect("epoch_config must not be None"), - ); + EpochConfig::::put(self.epoch_config.clone()); } } @@ -277,88 +273,50 @@ pub mod pallet { // At the end of the block, we can safely include the new VRF output from // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has - // already occurred at this point, so the under-construction randomness - // will only contain outputs from the right epoch. - // TODO-SASS-P2: maybe here we can `expect` that is initialized (panic if not) - if let Some(pre_digest) = Initialized::::take().flatten() { - let authority_index = pre_digest.authority_index; - - let randomness: Option = Authorities::::get() - .get(authority_index as usize) - .and_then(|(authority, _)| { - schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok() - }) - .and_then(|pubkey| { - let current_slot = CurrentSlot::::get(); - - let transcript = sp_consensus_sassafras::make_slot_transcript( - &Self::randomness(), - current_slot, - EpochIndex::::get(), - ); - - let vrf_output = pre_digest.block_vrf_output; - - // This has already been verified by the client on block import. - debug_assert!(pubkey - .vrf_verify( - transcript.clone(), - &vrf_output, - &pre_digest.block_vrf_proof - ) - .is_ok()); - - vrf_output.0.attach_input_hash(&pubkey, transcript).ok() - }) - .map(|inout| { - inout.make_bytes(sp_consensus_sassafras::SASSAFRAS_BLOCK_VRF_PREFIX) - }); - - // TODO-SASS-P2: this should be infallible. Randomness should be always deposited. - // Eventually better to panic here? - if let Some(randomness) = randomness { - Self::deposit_randomness(&randomness); - } - } + // already occurred at this point, so the + let pre_digest = Initialized::::take() + .expect("Finalization is called after initialization; qed."); + Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); } } #[pallet::call] impl Pallet { /// Submit next epoch tickets. + /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remov ethe weight? #[pallet::weight(10_000)] - pub fn submit_tickets(origin: OriginFor, tickets: Vec) -> DispatchResult { + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec, + ) -> DispatchResult { ensure_none(origin)?; + let mut metadata = TicketsMeta::::get(); + log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); - // We have to traverse the tickets list one by one to verify the SNARK proofs. - let mut next_tickets = NextTickets::::get(); - - // 1. validate proof - // 2. append to sorted list - // TODO-SASS-P2: use a scattered structure for tickets - next_tickets = next_tickets.try_mutate(|tree| { - for ticket in tickets.iter() { - tree.insert(*ticket); - } - let max_tickets = T::MaxTickets::get() as usize; - if tree.len() > max_tickets { - // Remove the mid values - // TODO-SASS-P2: with the new structure this will be reimplemented... - let diff = tree.len() - max_tickets; - let off = max_tickets / 2; - let val = tree.iter().nth(off).cloned().unwrap(); - let mut mid = tree.split_off(&val); - let val = mid.iter().nth(diff).cloned().unwrap(); - let mut tail = mid.split_off(&val); - tree.append(&mut tail); - log::warn!(target: "sassafras", "🌳 TICKETS OVERFLOW, drop {} tickets... (len = {})", diff, tree.len()); - } - }).expect("Tickets list len is within the allowed bounds; qed."); - - NextTickets::::put(next_tickets); + // We just require a unique key to save the partial tickets list. + metadata.segments_count += 1; + NextTicketsSegments::::insert(metadata.segments_count, tickets); + TicketsMeta::::set(metadata); + Ok(()) + } + /// Plan an epoch config change. The epoch config change is recorded and will be enacted on + /// the next call to `enact_session_change`. The config will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + #[pallet::weight(10_000)] + pub fn plan_config_change( + origin: OriginFor, + config: SassafrasEpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); Ok(()) } } @@ -384,6 +342,10 @@ pub mod pallet { // submit our tickets if we don't have enough authoring slots. // If we have 0 slots => we have zero chances. // Maybe this is one valid reason to introduce proxies. + // In short the question is >>> WHO HAS THE RIGHT TO SUBMIT A TICKET? <<< + // A) The current epoch validators + // B) The next epoch validators + // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) log::warn!( target: "sassafras::runtime", "🌳 Rejecting unsigned transaction from external sources.", @@ -392,7 +354,9 @@ pub mod pallet { } // Current slot should be less than half of epoch duration. - if Self::current_slot_epoch_index() >= T::EpochDuration::get() / 2 { + let epoch_duration = T::EpochDuration::get(); + + if Self::current_slot_epoch_index() >= epoch_duration / 2 { log::warn!( target: "sassafras::runtime", "🌳 Timeout to propose tickets, bailing out.", @@ -400,10 +364,27 @@ pub mod pallet { return InvalidTransaction::Stale.into() } - // TODO-SASS-P2 more validation steps: - // 1. epoch index - // 2. signed by an authority for current epoch - // 3. single submission attempt from validator? + // Check tickets are below threshold + + let next_auth = NextAuthorities::::get(); + let epoch_config = EpochConfig::::get(); + let threshold = sp_consensus_sassafras::compute_threshold( + epoch_config.redundancy_factor, + epoch_duration as u32, + epoch_config.attempts_number, + next_auth.len() as u32, + ); + + // TODO-SASS-P2: if we move this in the `submit_tickets` call then we can + // can drop only the invalid tickets. + // In this way we don't penalize validators that submit tickets together + // with faulty validators. + if !tickets + .iter() + .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) + { + return InvalidTransaction::Custom(0).into() + } ValidTransaction::with_tag_prefix("Sassafras") // We assign the maximum priority for any equivocation report. @@ -411,8 +392,8 @@ pub mod pallet { // TODO-SASS-P2: if possible use a more efficient way to distinquish // duplicates... .and_provides(tickets) - // TODO-SASS-P2: this should be set such that it is discarded after the first - // half + // TODO-SASS-P2: this sholot_tld be set such that it is discarded after the + // first half .longevity(3_u64) .propagate(true) .build() @@ -435,7 +416,7 @@ impl Pallet { /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_epoch_change(now: T::BlockNumber) -> bool { + pub fn should_end_session(now: T::BlockNumber) -> bool { // The epoch has technically ended during the passage of time between this block and the // last, but we have to "end" the epoch now, since there is no earlier possible block we // could have done it. @@ -443,6 +424,11 @@ impl Pallet { // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having // started at the slot of block 1. We want to use the same randomness and validator set as // signalled in the genesis, so we don't rotate the epoch. + + // TODO-SASS-P2 + // Is now != One required??? + // What if we want epochs with len = 1. In this case we doesn't change epoch correctly + // in slot 1. now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() } @@ -451,100 +437,106 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - if *GenesisSlot::::get() == 0 { - return 0 - } + // TODO-SASS-P2 : is this required? + // if *GenesisSlot::::get() == 0 { + // return 0 + // } *slot.saturating_sub(Self::current_epoch_start()) } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_session` /// has returned `true`, and the caller is the only caller of this function. /// /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. - pub fn enact_epoch_change( + /// + /// TODO-SASS-P3: + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. + /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. + pub(crate) fn enact_session_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< (AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities, >, ) { - // TODO-SASS-P2: we don't depend on session module... - - // PRECONDITION: caller has done initialization and is guaranteed by the session module to - // be called before this. + // PRECONDITION: caller has done initialization. + // If using the internal trigger or the session pallet then this is guaranteed. debug_assert!(Self::initialized().is_some()); + // Update authorities + Authorities::::put(authorities); + NextAuthorities::::put(&next_authorities); + // Update epoch index - let epoch_index = EpochIndex::::get() + let mut epoch_idx = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - EpochIndex::::put(epoch_index); - // Update authorities - Authorities::::put(authorities); - NextAuthorities::::put(&next_authorities); + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); + if slot_idx >= T::EpochDuration::get() { + // Detected one or more skipped epochs, kill tickets and recompute the `epoch_index`. + TicketsMeta::::kill(); + // TODO-SASS-P2: adjust epoch index (TEST ME) + let idx: u64 = slot_idx.into(); + epoch_idx += idx / T::EpochDuration::get(); + } + EpochIndex::::put(epoch_idx); - // Update epoch randomness. - let next_epoch_index = epoch_index + let next_epoch_index = epoch_idx .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - // Returns randomness for the current epoch and computes the *next* - // epoch randomness. - let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::::put(randomness); + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_randomness(next_epoch_index); - // // Update the start blocks of the previous and new current epoch. - // >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { - // *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); - // *current_epoch_start_block = >::block_number(); - // }); + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config.clone() { + NextEpochConfig::::put(next_config); + } // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - - let next_randomness = NextRandomness::::get(); - let next_epoch = NextEpochDescriptor { authorities: next_authorities.to_vec(), randomness: next_randomness, + config: next_config, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - // if let Some(next_config) = NextEpochConfig::::get() { - // EpochConfig::::put(next_config); - // } - - // if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { - // let next_epoch_config: BabeEpochConfiguration = - // pending_epoch_config_change.clone().into(); - // NextEpochConfig::::put(next_epoch_config); - // Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); - // } - - Self::enact_tickets(); + let epoch_key = (epoch_idx & 1) as u8; + let mut tickets_metadata = TicketsMeta::::get(); + // Optionally finish sorting + if tickets_metadata.segments_count != 0 { + Self::sort_tickets(u32::MAX, epoch_key, &mut tickets_metadata); + } + // Clear the prev (equal to the next) epoch tickets counter. + let next_epoch_key = epoch_key ^ 1; + tickets_metadata.tickets_count[next_epoch_key as usize] = 0; + TicketsMeta::::set(tickets_metadata); } - /// Enact next epoch tickets list. - /// To work properly this should be done as the last action of the last epoch slot. - /// (i.e. current tickets list is not used at this point) - fn enact_tickets() { - // TODO-SASS-P2: manage skipped epoch by killing both Tickets and NextTickets + /// Call this function on epoch change to update the randomness. + /// Returns the next epoch randomness. + fn update_randomness(next_epoch_index: u64) -> Randomness { + let curr_randomness = NextRandomness::::get(); + CurrentRandomness::::put(curr_randomness); - let mut tickets = NextTickets::::get().into_iter().collect::>(); - log::debug!(target: "sassafras", "🌳 @@@@@@@@@ Enacting {} tickets", tickets.len()); + let accumulator = RandomnessAccumulator::::get(); + let mut s = Vec::with_capacity(2 * curr_randomness.len() + 8); + s.extend_from_slice(&curr_randomness); + s.extend_from_slice(&next_epoch_index.to_le_bytes()); + s.extend_from_slice(&accumulator); - if tickets.len() > T::MaxTickets::get() as usize { - log::error!(target: "sassafras", "🌳 should never happen..."); - let max = T::MaxTickets::get() as usize; - tickets.truncate(max); - } - let tickets = BoundedVec::::try_from(tickets) - .expect("vector has been eventually truncated; qed"); + let next_randomness = sp_io::hashing::blake2_256(&s); + NextRandomness::::put(&next_randomness); - Tickets::::put(tickets); - NextTickets::::kill(); + next_randomness } /// Finds the start slot of the current epoch. Only guaranteed to give correct results after @@ -567,7 +559,7 @@ impl Pallet { >::deposit_log(log) } - fn deposit_randomness(randomness: &schnorrkel::Randomness) { + fn deposit_randomness(randomness: &Randomness) { let mut s = RandomnessAccumulator::::get().to_vec(); s.extend_from_slice(randomness); let accumulator = sp_io::hashing::blake2_256(&s); @@ -575,36 +567,43 @@ impl Pallet { } // Initialize authorities on genesis phase. - // TODO-SASS-P2: temporary fix to make the compiler happy - #[allow(dead_code)] fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { - if !authorities.is_empty() { - assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); - let bounded_authorities = - WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) - .expect("Initial number of authorities should be lower than T::MaxAuthorities"); - Authorities::::put(&bounded_authorities); - NextAuthorities::::put(&bounded_authorities); + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // If this function has already been called with some authorities, then the new list + // should be match the previously set one. + let prev_authorities = Authorities::::get(); + if !prev_authorities.is_empty() { + if prev_authorities.to_vec() == authorities { + return + } else { + panic!("Authorities already were already initialized"); + } } + + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); } fn initialize_genesis_epoch(genesis_slot: Slot) { GenesisSlot::::put(genesis_slot); - debug_assert_ne!(*GenesisSlot::::get(), 0); - // Deposit a log because this is the first block in epoch #0. We use the same values - // as genesis because we haven't collected any randomness yet. + // Deposit a log because this is the first block in epoch #0. + // We use the same values as genesis because we haven't collected any randomness yet. let next = NextEpochDescriptor { authorities: Self::authorities().to_vec(), randomness: Self::randomness(), + config: None, }; - Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } fn initialize(now: T::BlockNumber) { // Since `initialize` can be called twice (e.g. if session module is present) - // let's ensure that we only do the initialization once per block + // let's ensure that we only do the initialization once per block. + // TODO-SASS-P2: why session calls initialize? if Self::initialized().is_some() { return } @@ -622,89 +621,186 @@ impl Pallet { }) .next(); - // TODO-SASS-P2: maybe here we have to assert! the presence of pre_digest... - // Every valid sassafras block should come with a pre-digest - - if let Some(ref pre_digest) = pre_digest { - // The slot number of the current block being initialized - let current_slot = pre_digest.slot; - - // On the first non-zero block (i.e. block #1) this is where the first epoch - // (epoch #0) actually starts. We need to adjust internal storage accordingly. - if *GenesisSlot::::get() == 0 { - Self::initialize_genesis_epoch(current_slot) - } + let pre_digest = pre_digest.expect("Valid Sassafras block should have a pre-digest. qed"); // let Some(ref pre_digest) = pre_digest { + // + let current_slot = pre_digest.slot; + CurrentSlot::::put(current_slot); - CurrentSlot::::put(current_slot); + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + Self::initialize_genesis_epoch(current_slot) } Initialized::::put(pre_digest); - // enact epoch change, if necessary. - T::EpochChangeTrigger::trigger::(now); - } - - /// Call this function exactly once when an epoch changes, to update the randomness. - /// Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::::get(); - let accumulator = RandomnessAccumulator::::get(); - - let mut s = Vec::with_capacity(2 * this_randomness.len() + 8); - s.extend_from_slice(&this_randomness); - s.extend_from_slice(&next_epoch_index.to_le_bytes()); - s.extend_from_slice(&accumulator); - - let next_randomness = sp_io::hashing::blake2_256(&s); - NextRandomness::::put(&next_randomness); + // TODO-SASS-P2: incremental parial ordering for NextTickets - this_randomness + // Enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now); } - /// Fetch expected ticket for the given slot. - // TODO-SASS-P2: This is a very inefficient and temporary solution. - // On refactory we will come up with a better solution (like a scattered vector). + /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the `Tickets` + /// list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the `NextTickets` + /// list. Note that in this case we may have not finished receiving all the tickets for that + /// epoch yet. The next epoch tickets should be considered "stable" only after the current + /// epoch first half (see the [`submit_tickets_unsigned_extrinsic`]). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. pub fn slot_ticket(slot: Slot) -> Option { + let epoch_idx = EpochIndex::::get(); let duration = T::EpochDuration::get(); - let slot_idx = Self::slot_epoch_index(slot); // % duration; + let mut slot_idx = Self::slot_epoch_index(slot); + let mut tickets_meta = TicketsMeta::::get(); - // Given a list of ordered tickets: t0, t1, t2, ..., tk to be assigned to N slots (N>k) - // The tickets are assigned to the slots in the following order: t1, t3, ..., t4, t2, t0. - - let ticket_index = |slot_idx| { + let get_ticket_idx = |slot_idx| { let ticket_idx = if slot_idx < duration / 2 { 2 * slot_idx + 1 } else { 2 * (duration - (slot_idx + 1)) }; - log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); - ticket_idx as usize + log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); + ticket_idx as u32 + }; + + let mut epoch_key = (epoch_idx & 1) as u8; + + if duration <= slot_idx && slot_idx < 2 * duration { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_key ^= 1; + slot_idx -= duration; + if tickets_meta.segments_count != 0 { + Self::sort_tickets(tickets_meta.segments_count, epoch_key, &mut tickets_meta); + TicketsMeta::::set(tickets_meta.clone()); + } + } else if slot_idx >= 2 * duration { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < tickets_meta.tickets_count[epoch_key as usize] { + Tickets::::get((epoch_key, ticket_idx)) + } else { + None + } + } + + // Sort the tickets that belong to at most `max_iter` segments starting from the last. + // If the `max_iter` value is equal to the number of segments then the result is truncated + // and saved as the tickets associated to `epoch_key`. + // Else the result is saved within the structure itself to be used on next iterations. + fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { + let mut segments_count = metadata.segments_count; + let max_iter = max_iter.min(segments_count); + let max_tickets = T::MaxTickets::get() as usize; + + let mut new_segment = if metadata.sort_started { + NextTicketsSegments::::take(u32::MAX).into_inner() + } else { + Vec::new() }; - // If this is a ticket for an epoch not enacted yet we have to fetch it from the - // `NextTickets` list. For example, this may happen when an author request the first - // ticket of a new epoch. - if slot_idx < duration { - let tickets = Tickets::::get(); - let idx = ticket_index(slot_idx); - tickets.get(idx).cloned() + let mut require_sort = max_iter != 0; + + let mut sup = if new_segment.len() >= max_tickets { + new_segment[new_segment.len() - 1] + } else { + Ticket::try_from([0xFF; 32]).expect("This is a valid ticket value; qed") + }; + + for _ in 0..max_iter { + let segment = NextTicketsSegments::::take(segments_count); + + segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); + if new_segment.len() > max_tickets { + require_sort = false; + new_segment.sort_unstable(); + new_segment.truncate(max_tickets); + sup = new_segment[new_segment.len() - 1]; + } + + segments_count -= 1; + } + + if require_sort { + new_segment.sort_unstable(); + } + + if segments_count == 0 { + // Sort is over, write to the map. + // TODO-SASS-P2: is there a better way to write a map from a vector? + new_segment.iter().enumerate().for_each(|(i, t)| { + Tickets::::insert((epoch_key, i as u32), t); + }); + metadata.tickets_count[epoch_key as usize] = new_segment.len() as u32; } else { - let tickets = NextTickets::::get(); - // Do not use modulus since we want to eventually return `None` for slots crossing the - // epoch boundaries. - let idx = ticket_index(slot_idx - duration); - tickets.iter().nth(idx).cloned() + NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); + metadata.sort_started = true; } + + metadata.segments_count = segments_count; } /// Submit next epoch validator tickets via an unsigned extrinsic. - pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has + /// is called within the first half of the epoch. That is, tickets received within the + /// second half are dropped. + /// TODO-SASS-P3: we have to add the zk validity proofs + pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); + tickets.sort_unstable(); + let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; SubmitTransaction::>::submit_unsigned_transaction(call.into()).is_ok() } } +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); +} + +/// A type signifying to Sassafras that an external trigger for epoch changes +/// (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes with an internal +/// trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: T::BlockNumber) { + if >::should_end_session(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_session_change(authorities, next_authorities); + } + } +} + impl BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs new file mode 100644 index 0000000000000..25ef4f61fb881 --- /dev/null +++ b/frame/sassafras/src/mock.rs @@ -0,0 +1,231 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras pallet. + +use crate::{self as pallet_sassafras, SameAuthoritiesForever}; + +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}, +}; +use scale_codec::Encode; +use sp_consensus_sassafras::{ + digests::PreDigest, + vrf::{self, VRFOutput, VRFProof}, + AuthorityIndex, AuthorityPair, Slot, +}; +use sp_core::{ + crypto::{IsWrappedBy, Pair}, + H256, U256, +}; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, TestXt}, + traits::IdentityLookup, +}; + +const EPOCH_DURATION: u64 = 10; +const MAX_TICKETS: u32 = 6; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +type DummyValidatorId = u64; + +type AccountData = u128; + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} + +impl frame_system::Config for Test { + type Event = Event; + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Version = (); + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = DummyValidatorId; + type Lookup = IdentityLookup; + type Header = Header; + type BlockHashCount = ConstU64<250>; + type PalletInfo = PalletInfo; + type AccountData = AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); //Sassafras; + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type EpochDuration = ConstU64; + type ExpectedBlockTime = ConstU64<1>; + type EpochChangeTrigger = SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32; +} + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len).1 +} + +pub fn new_test_ext_with_pairs( + authorities_len: usize, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities = pairs.iter().map(|p| (p.public(), 1)).collect(); + + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let config = pallet_sassafras::GenesisConfig { authorities, epoch_config: Default::default() }; + >::assimilate_storage(&config, &mut t) + .unwrap(); + + (pairs, t.into()) +} + +fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization + let epoch_start = Sassafras::current_epoch_start(); + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); + } + + let transcript = vrf::make_ticket_transcript(&randomness, attempt, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_vrf(slot, attempt, pair)) + .collect() +} + +fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { + let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); + + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization + let epoch_start = Sassafras::current_epoch_start(); + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); + } + + let transcript = vrf::make_slot_transcript(&randomness, slot, epoch); + let inout = pair.vrf_sign(transcript); + let output = VRFOutput(inout.0.to_output()); + let proof = VRFProof(inout.1); + + (output, proof) +} + +pub fn make_pre_digest( + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> PreDigest { + let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); + PreDigest { authority_index, slot, vrf_output, vrf_proof, ticket_info: None } +} + +pub fn make_wrapped_pre_digest( + authority_index: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> Digest { + let pre_digest = make_pre_digest(authority_index, slot, pair); + let log = + DigestItem::PreRuntime(sp_consensus_sassafras::SASSAFRAS_ENGINE_ID, pre_digest.encode()); + Digest { logs: vec![log] } +} + +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_wrapped_pre_digest(0, slot, pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Slots will grow accordingly to blocks +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot = slot + 1; + } + digest +} diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs new file mode 100644 index 0000000000000..15cdab95d8887 --- /dev/null +++ b/frame/sassafras/src/session.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras implementation of traits required by session pallet. + +use super::*; +use frame_support::traits::{EstimateNextSessionRotation, OneSessionHandler}; +use pallet_session::ShouldEndSession; +use sp_runtime::{traits::SaturatedConversion, Permill}; + +impl ShouldEndSession for Pallet { + fn should_end_session(now: T::BlockNumber) -> bool { + // It might be (and it is in current implementation) that session module is calling + // `should_end_session` from it's own `on_initialize` handler, in which case it's + // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we + // have initialized the pallet and updated the current slot. + Self::initialize(now); + Self::should_end_session(now) + } +} + +impl OneSessionHandler for Pallet { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_genesis_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self::enact_session_change(bounded_authorities, next_bounded_authorities) + } + + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) + } +} + +impl EstimateNextSessionRotation for Pallet { + fn average_session_length() -> T::BlockNumber { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + let progress = Permill::from_rational(*elapsed, T::EpochDuration::get()); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (Some(progress), T::DbWeight::get().reads(3)) + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an internal error and should + /// not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // + // ## IMPORTANT NOTE + // + // This implementation is linked to how [`should_session_change`] is working. This might need + // to be updated accordingly, if the underlying mechanics of slot and epochs change. + fn estimate_next_session_rotation(now: T::BlockNumber) -> (Option, Weight) { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (upper_bound, T::DbWeight::get().reads(3)) + } +} diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs new file mode 100644 index 0000000000000..3eadff59cdd6f --- /dev/null +++ b/frame/sassafras/src/tests.rs @@ -0,0 +1,414 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras pallet. + +use crate::*; +use mock::*; + +use frame_support::traits::{OnFinalize, OnInitialize}; +use hex_literal::hex; +use sp_consensus_sassafras::Slot; +use sp_runtime::traits::Get; + +#[test] +fn slot_ticket_fetch() { + let max_tickets: u32 = ::MaxTickets::get(); + assert_eq!(max_tickets, 6); + + let curr_tickets: Vec = (0..max_tickets as u8) + .into_iter() + .map(|i| [i; 32].try_into().unwrap()) + .collect(); + + let next_tickets: Vec = (0..(max_tickets - 1) as u8) + .into_iter() + .map(|i| [max_tickets as u8 + i; 32].try_into().unwrap()) + .collect(); + + new_test_ext(4).execute_with(|| { + curr_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((0, i as u32), ticket); + }); + next_tickets.iter().enumerate().for_each(|(i, ticket)| { + Tickets::::insert((1, i as u32), ticket); + }); + TicketsMeta::::set(TicketsMetadata { + tickets_count: [max_tickets, max_tickets - 1], + segments_count: 0, + sort_started: false, + }); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(1.into()), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket(2.into()), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket(3.into()), None); + assert_eq!(Sassafras::slot_ticket(4.into()), None); + assert_eq!(Sassafras::slot_ticket(5.into()), None); + assert_eq!(Sassafras::slot_ticket(6.into()), None); + assert_eq!(Sassafras::slot_ticket(7.into()), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket(8.into()), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket(9.into()), Some(curr_tickets[0])); + + // Test next session tickets fetch + assert_eq!(Sassafras::slot_ticket(10.into()), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket(11.into()), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket(12.into()), None); //Some(next_tickets[5])); + assert_eq!(Sassafras::slot_ticket(13.into()), None); + assert_eq!(Sassafras::slot_ticket(14.into()), None); + assert_eq!(Sassafras::slot_ticket(15.into()), None); + assert_eq!(Sassafras::slot_ticket(16.into()), None); + assert_eq!(Sassafras::slot_ticket(17.into()), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket(18.into()), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket(19.into()), Some(next_tickets[0])); + + // Test fetch beyend next session + assert_eq!(Sassafras::slot_ticket(20.into()), None); + assert_eq!(Sassafras::slot_ticket(42.into()), None); + }); +} + +#[test] +fn genesis_values() { + new_test_ext(4).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 4); + assert_eq!(EpochConfig::::get(), Default::default()); + }); +} + +#[test] +fn on_first_block_after_genesis() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!(RandomnessAccumulator::::get(), [0; 32]); + + Sassafras::on_finalize(1); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + ); + + Sassafras::on_finalize(2); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("ea16f22af4afe5bfb8e3be3e257c3a88ae0c2406a4afc067871b6e5a7ae8756e"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }); +} + +#[test] +fn epoch_change_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32],); + assert_eq!( + NextRandomness::::get(), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("ec9f2acd75e3a901b3a3fad95267a275af1aded3df8bebebb8d14ebd2190ce59"), + ); + + Sassafras::on_finalize(start_block + epoch_duration); + let header = System::finalize(); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!( + NextRandomness::::get(), + hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + ); + assert_eq!( + RandomnessAccumulator::::get(), + hex!("d017578d6bad1856315866ce1ef845c2584873fcbc011db7dcb99f1f19baa6f3"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn submit_enact_claim_tickets() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + let _digest = progress_to_block(2, &pairs[0]).unwrap(); + + // Check state before tickets submission + assert!(Tickets::::iter().next().is_none()); + + // Submit authoring tickets in three different batches. + // We can ignore the threshold since we are not passing through the unsigned extrinsic + // validation. + let mut tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets0).unwrap(); + let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets1).unwrap(); + let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(Origin::none(), tickets2).unwrap(); + + tickets.sort(); + tickets.truncate(max_tickets as usize); + let expected_tickets = tickets; + + // Check state after submit + let meta = TicketsMeta::::get(); + assert!(Tickets::::iter().next().is_none()); + assert_eq!(meta.segments_count, 3); + assert_eq!(meta.tickets_count, [0, 0]); + + // Process up to the last epoch slot (do not enact epoch change) + let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); + + // TODO-SASS-P2: at this point next tickets should have been sorted + //assert_eq!(NextTicketsSegmentsCount::::get(), 0); + //assert!(Tickets::::iter().next().is_some()); + + // Check if we can claim next epoch tickets in outside-in fashion. + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 3).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 4).is_none()); + assert!(Sassafras::slot_ticket(slot + 7).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 10).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 11).is_none()); + + // Enact session change by progressing one more block + + let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); + + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket(slot).unwrap(), expected_tickets[1]); + assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[3]); + assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[5]); + assert!(Sassafras::slot_ticket(slot + 3).is_none()); + assert!(Sassafras::slot_ticket(slot + 6).is_none()); + assert_eq!(Sassafras::slot_ticket(slot + 7).unwrap(), expected_tickets[4]); + assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[2]); + assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[0]); + assert!(Sassafras::slot_ticket(slot + 10).is_none()); + }); +} + +#[test] +fn block_skips_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let epoch_duration: u64 = ::EpochDuration::get(); + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]) + .into_iter() + .map(|(output, _)| output) + .collect(); + Sassafras::submit_tickets(Origin::none(), BoundedVec::truncate_from(tickets.clone())) + .unwrap(); + + // Force enact of next tickets + assert_eq!(TicketsMeta::::get().segments_count, 1); + Sassafras::slot_ticket(start_slot + epoch_duration).unwrap(); + assert_eq!(TicketsMeta::::get().segments_count, 0); + + let next_random = NextRandomness::::get(); + + // We want to trigger an skip epoch in this test. + let offset = 3 * epoch_duration; + let _digest = go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 3); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_epoch_index(), 0); + + // Tickets were discarded + let meta = TicketsMeta::::get(); + assert_eq!(meta, TicketsMetadata::default()); + // We've used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); +} diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 68116c6b91f70..eb318a5caa379 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Private implementation details of Sassafras digests. use super::{ - AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, Slot, TicketInfo, - SASSAFRAS_ENGINE_ID, + AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, Slot, TicketInfo, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -36,22 +36,24 @@ pub struct PreDigest { pub authority_index: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, - /// Block VRF output. - pub block_vrf_output: VRFOutput, - /// Block VRF proof. - pub block_vrf_proof: VRFProof, + /// Slot VRF output. + pub vrf_output: VRFOutput, + /// Slot VRF proof. + pub vrf_proof: VRFProof, /// Ticket information. pub ticket_info: Option, } /// Information about the next epoch. This is broadcast in the first block /// of the epoch. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { /// The authorities. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// The value of randomness to use for the slot-assignment. pub randomness: Randomness, + /// Algorithm parameters. If not present, previous epoch parameters are used. + pub config: Option, } /// An consensus log item for BABE. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 0546c99c52984..4754081fbc126 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -22,42 +22,35 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -pub use merlin::Transcript; - use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; +use sp_core::{crypto, U256}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + PublicKey, Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, + VRF_PROOF_LENGTH, }; -/// Key type for Sassafras module. -pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; - pub mod digests; pub mod inherents; +pub mod vrf; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; app_crypto!(sr25519, SASSAFRAS); } +/// Key type for Sassafras protocol. +pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + /// The index of an authority. pub type AuthorityIndex = u32; -/// The prefix used by Sassafras for its ticket VRF keys. -pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf"; - -/// The prefix used by Sassafras for its post-block VRF keys. -pub const SASSAFRAS_BLOCK_VRF_PREFIX: &[u8] = b"substrate-sassafras-block-vrf"; - /// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Sassafras module. If that ever changes, then this must, too. #[cfg(feature = "std")] @@ -87,25 +80,34 @@ pub type SassafrasBlockWeight = u32; /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] -pub struct SassafrasGenesisConfiguration { - /// The slot duration in milliseconds for Sassafras. +pub struct SassafrasConfiguration { + /// The slot duration in milliseconds. pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: u64, - /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// The randomness for the genesis epoch. + pub epoch_duration: u64, + /// The authorities for the epoch. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// The randomness for the epoch. pub randomness: Randomness, + /// Tickets threshold parameters. + pub threshold_params: SassafrasEpochConfiguration, +} + +impl SassafrasConfiguration { + /// Get the slot duration defined in the genesis configuration. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.slot_duration) + } } /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct SassafrasEpochConfiguration { - // TODO-SASS-P2 - // x: redundancy_factor - // a: attempts number - // L: bound on aa number of tickets that can be gossiped + /// Redundancy factor. + pub redundancy_factor: u32, + /// Number of attempts for tickets generation. + pub attempts_number: u32, } /// Ticket type. @@ -122,65 +124,36 @@ pub struct TicketInfo { pub proof: VRFProof, } -/// Make slot VRF transcript. -pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_u64(b"slot number", *slot); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", &randomness[..]); - transcript +/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: +/// - x: redundancy factor; +/// - s: number of slots in epoch; +/// - a: max number of attempts; +/// - v: number of validator in epoch. +/// The parameters should be chosen such that T <= 1. +/// If `attempts * validators` is zero then we fallback to T = 0 +// TODO-SASS-P3: this formula must be double-checked... +#[inline] +pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> U256 { + let den = attempts as u64 * validators as u64; + let num = redundancy as u64 * slots as u64; + U256::max_value() + .checked_div(den.into()) + .unwrap_or(U256::zero()) + .saturating_mul(num.into()) } -/// Make slot VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_slot_transcript_data( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - ("slot number", VRFTranscriptValue::U64(*slot)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} - -/// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &[u8], attempt: u64, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(b"type", b"ticket"); - transcript.append_u64(b"attempt", attempt); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", randomness); - transcript -} - -/// Make ticket VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_ticket_transcript_data( - randomness: &[u8], - attempt: u64, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - ("type", VRFTranscriptValue::Bytes(b"ticket".to_vec())), - ("attempt", VRFTranscriptValue::U64(attempt)), - ("current epoch", VRFTranscriptValue::U64(epoch)), - ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } +/// Returns true if the given VRF output is lower than the given threshold, false otherwise. +#[inline] +pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { + U256::from(ticket.as_bytes()) < threshold } +// Runtime API. sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. - fn configuration() -> SassafrasGenesisConfiguration; + fn configuration() -> SassafrasConfiguration; /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..1c46fe77a6c6e --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives related to VRF input and output. + +pub use merlin::Transcript; + +pub use sp_consensus_slots::Slot; +pub use sp_consensus_vrf::schnorrkel::{PublicKey, Randomness, VRFOutput, VRFProof}; +#[cfg(feature = "std")] +use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; + +use crate::SASSAFRAS_ENGINE_ID; + +const TYPE_LABEL: &str = "type"; +const EPOCH_LABEL: &str = "epoch"; +const SLOT_LABEL: &str = "slot"; +const ATTEMPT_LABEL: &str = "slot"; +const RANDOMNESS_LABEL: &str = "randomness"; + +const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; +const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; + +/// Make slot VRF transcript. +pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make slot VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_slot_transcript_data( + randomness: &Randomness, + slot: Slot, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), + (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} + +/// Make ticket VRF transcript. +pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); + transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); + transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); + transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); + transcript +} + +/// Make ticket VRF transcript data container. +#[cfg(feature = "std")] +pub fn make_ticket_transcript_data( + randomness: &Randomness, + attempt: u32, + epoch: u64, +) -> VRFTranscriptData { + VRFTranscriptData { + label: &SASSAFRAS_ENGINE_ID, + items: vec![ + (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), + (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), + (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), + (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), + ], + } +} From a7fa9402327f90dc98d901d701fa2a4d67a9a5f6 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 23 Aug 2022 19:21:37 +0200 Subject: [PATCH 03/62] Fix obsolete dependencies --- Cargo.lock | 1 - bin/node-sassafras/node/Cargo.toml | 2 +- client/consensus/sassafras/Cargo.toml | 1 - client/consensus/sassafras/src/authorship.rs | 21 ++++++++++---------- client/consensus/sassafras/src/lib.rs | 1 - 5 files changed, 11 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c435a6090c382..3530b07a222f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8230,7 +8230,6 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.0", - "retain_mut", "sc-client-api", "sc-consensus", "sc-consensus-epochs", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 9133c2141c837..8587b0462a03f 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -41,7 +41,7 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.14.0", features = ["server"] } +jsonrpsee = { version = "0.15.1", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 51400bd66721d..888959090b31a 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -19,7 +19,6 @@ scale-codec = { package = "parity-scale-codec", version = "3.0.0", features = [" futures = "0.3.21" log = "0.4.16" parking_lot = "0.12.0" -retain_mut = "0.1.4" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 8f1aa1115d2a5..32076eb7c34c1 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -247,17 +247,16 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { - RetainMut::retain_mut(&mut *self.slot_notification_sinks.lock(), |sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => - if e.is_full() { - warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); - true - } else { - false - }, - } + let sinks = &mut self.slot_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + true + } else { + false + }, }); } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index d81b8788fbae9..9cfbeff728970 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -40,7 +40,6 @@ use futures::{ use log::{debug, error, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use retain_mut::RetainMut; use scale_codec::{Decode, Encode}; use schnorrkel::SignatureError; From b0a218ccefbeb396f78e5a37c6aa73c9ed3d23cb Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 2 Sep 2022 14:18:08 +0200 Subject: [PATCH 04/62] Fix slot to epoch index conversion --- Cargo.lock | 2 +- bin/node-sassafras/runtime/src/lib.rs | 2 +- client/consensus/sassafras/src/authorship.rs | 2 +- frame/sassafras/src/lib.rs | 149 ++++++++----------- frame/sassafras/src/mock.rs | 10 +- frame/sassafras/src/session.rs | 4 +- frame/sassafras/src/tests.rs | 67 +++++---- 7 files changed, 105 insertions(+), 131 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc4a2db9bc2d6..1398936c64c2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8255,7 +8255,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sc-consensus", "sc-consensus-epochs", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c428931e99dbe..77d176f03732a 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -149,7 +149,7 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults(2_u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 32076eb7c34c1..5d277bc9e317c 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -223,7 +223,7 @@ where let block_id = BlockId::Hash(parent_header.hash()); let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; - // TODO-SASS-P2 + // TODO-SASS-P2: remove me debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); let claim = authorship::claim_slot( diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 31678a6199ec7..a87b248526390 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -78,13 +78,11 @@ pub use pallet::*; /// Tickets related metadata that is commonly used together. #[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] pub struct TicketsMetadata { - /// Number of tickets available for even and odd session indices respectivelly. + /// Number of tickets available for even and odd sessions, respectivelly. /// I.e. the index is computed as session-index modulo 2. pub tickets_count: [u32; 2], /// Number of tickets segments pub segments_count: u32, - /// Last segment has been already sorted - pub sort_started: bool, } #[frame_support::pallet] @@ -155,7 +153,7 @@ pub mod pallet { ValueQuery, >; - /// Next epoch authorities. + /// Next session authorities. #[pallet::storage] pub type NextAuthorities = StorageValue< _, @@ -163,8 +161,8 @@ pub mod pallet { ValueQuery, >; - /// The slot at which the first epoch actually started. This is 0 - /// until the first block of the chain. + /// The slot at which the first session started. + /// This is `None` until the first block is imported on chain. #[pallet::storage] #[pallet::getter(fn genesis_slot)] pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; @@ -174,21 +172,12 @@ pub mod pallet { #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// The epoch randomness for the *current* epoch. - /// - /// # Security - /// - /// This MUST NOT be used for gambling, as it can be influenced by a - /// malicious validator in the short term. It MAY be used in many - /// cryptographic protocols, however, so long as one remembers that this - /// (like everything else on-chain) it is public. For example, it can be - /// used where a number is needed that cannot have been chosen by an - /// adversary, for purposes such as public-coin zero-knowledge proofs. + /// Current session randomness. #[pallet::storage] #[pallet::getter(fn randomness)] pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; - /// Next epoch randomness. + /// Next session randomness. #[pallet::storage] pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; @@ -213,9 +202,7 @@ pub mod pallet { /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next /// epoch is enacted. - /// TODO-SASS-P2: better doc? Double check if next epoch tickets were computed using NextEpoch - /// params in the native ecode. - /// In other words a config change submitted during session N will be enacted on session N+2. + /// In other words, a config change submitted during session N will be enacted on session N+2. /// This is to maintain coherence for already submitted tickets for epoch N+1 that where /// computed using configuration parameters stored for session N+1. #[pallet::storage] @@ -232,12 +219,8 @@ pub mod pallet { #[pallet::storage] pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; - // /// Next session tickets temporary accumulator length. - // #[pallet::storage] - // pub type NextTicketsSegmentsCount = StorageValue<_, u32, ValueQuery>; - /// Next session tickets temporary accumulator. - /// Special u32::MAX key is reserved for partially sorted segment. + /// Special `u32::MAX` key is reserved for partially sorted segment. #[pallet::storage] pub type NextTicketsSegments = StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; @@ -264,8 +247,44 @@ pub mod pallet { impl Hooks> for Pallet { /// Block initialization fn on_initialize(now: BlockNumberFor) -> Weight { - Self::initialize(now); - 0 + // Since `initialize` can be called twice (e.g. if session pallet is used) + // let's ensure that we only do the initialization once per block. + if Self::initialized().is_some() { + return Weight::zero() + } + + let pre_digest = >::digest() + .logs + .iter() + .filter_map(|s| { + s.as_pre_runtime().and_then(|(id, mut data)| { + if id == SASSAFRAS_ENGINE_ID { + PreDigest::decode(&mut data).ok() + } else { + None + } + }) + }) + .next() + .expect("Valid Sassafras block should have a pre-digest. qed"); + + CurrentSlot::::put(pre_digest.slot); + + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + log::debug!(target: "sassafras", "🌳 >>> GENESIS SLOT: {:?}", pre_digest.slot); + Self::initialize_genesis_epoch(pre_digest.slot) + } + + Initialized::::put(pre_digest); + + // TODO-SASS-P3: incremental partial ordering for Next epoch tickets. + + // Enact session change, if necessary. + T::EpochChangeTrigger::trigger::(now); + + Weight::zero() } /// Block finalization @@ -392,7 +411,7 @@ pub mod pallet { // TODO-SASS-P2: if possible use a more efficient way to distinquish // duplicates... .and_provides(tickets) - // TODO-SASS-P2: this sholot_tld be set such that it is discarded after the + // TODO-SASS-P2: this should be set such that it is discarded after the // first half .longevity(3_u64) .propagate(true) @@ -424,11 +443,6 @@ impl Pallet { // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having // started at the slot of block 1. We want to use the same randomness and validator set as // signalled in the genesis, so we don't rotate the epoch. - - // TODO-SASS-P2 - // Is now != One required??? - // What if we want epochs with len = 1. In this case we doesn't change epoch correctly - // in slot 1. now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() } @@ -437,11 +451,10 @@ impl Pallet { } fn slot_epoch_index(slot: Slot) -> u64 { - // TODO-SASS-P2 : is this required? - // if *GenesisSlot::::get() == 0 { - // return 0 - // } - *slot.saturating_sub(Self::current_epoch_start()) + if *GenesisSlot::::get() == 0 { + return 0 + } + slot.checked_sub(Self::current_epoch_start().into()).unwrap_or(u64::MAX) } /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_session` @@ -513,7 +526,7 @@ impl Pallet { let mut tickets_metadata = TicketsMeta::::get(); // Optionally finish sorting if tickets_metadata.segments_count != 0 { - Self::sort_tickets(u32::MAX, epoch_key, &mut tickets_metadata); + Self::sort_tickets(tickets_metadata.segments_count, epoch_key, &mut tickets_metadata); } // Clear the prev (equal to the next) epoch tickets counter. let next_epoch_key = epoch_key ^ 1; @@ -600,46 +613,6 @@ impl Pallet { Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } - fn initialize(now: T::BlockNumber) { - // Since `initialize` can be called twice (e.g. if session module is present) - // let's ensure that we only do the initialization once per block. - // TODO-SASS-P2: why session calls initialize? - if Self::initialized().is_some() { - return - } - - let pre_digest = >::digest() - .logs - .iter() - .filter_map(|s| s.as_pre_runtime()) - .filter_map(|(id, mut data)| { - if id == SASSAFRAS_ENGINE_ID { - PreDigest::decode(&mut data).ok() - } else { - None - } - }) - .next(); - - let pre_digest = pre_digest.expect("Valid Sassafras block should have a pre-digest. qed"); // let Some(ref pre_digest) = pre_digest { - // - let current_slot = pre_digest.slot; - CurrentSlot::::put(current_slot); - - // On the first non-zero block (i.e. block #1) this is where the first epoch - // (epoch #0) actually starts. We need to adjust internal storage accordingly. - if *GenesisSlot::::get() == 0 { - Self::initialize_genesis_epoch(current_slot) - } - - Initialized::::put(pre_digest); - - // TODO-SASS-P2: incremental parial ordering for NextTickets - - // Enact epoch change, if necessary. - T::EpochChangeTrigger::trigger::(now); - } - /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. /// /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, @@ -701,20 +674,17 @@ impl Pallet { } } - // Sort the tickets that belong to at most `max_iter` segments starting from the last. - // If the `max_iter` value is equal to the number of segments then the result is truncated - // and saved as the tickets associated to `epoch_key`. - // Else the result is saved within the structure itself to be used on next iterations. + // Lexicographically sort the tickets who belongs to the next epoch. + // The tickets are fetched from at most `max_iter` segments received via the `submit_tickets` + // extrinsic. The resulting sorted vector is truncated and if all the segments where sorted + // it is saved to be as the next session tickets. + // Else the result is saved to be used by next calls. fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { let mut segments_count = metadata.segments_count; let max_iter = max_iter.min(segments_count); let max_tickets = T::MaxTickets::get() as usize; - let mut new_segment = if metadata.sort_started { - NextTicketsSegments::::take(u32::MAX).into_inner() - } else { - Vec::new() - }; + let mut new_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); let mut require_sort = max_iter != 0; @@ -744,14 +714,13 @@ impl Pallet { if segments_count == 0 { // Sort is over, write to the map. - // TODO-SASS-P2: is there a better way to write a map from a vector? + // TODO-SASS-P3: is there a better way to write a map from a vector? new_segment.iter().enumerate().for_each(|(i, t)| { Tickets::::insert((epoch_key, i as u32), t); }); metadata.tickets_count[epoch_key as usize] = new_segment.len() as u32; } else { NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); - metadata.sort_started = true; } metadata.segments_count = segments_count; diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 25ef4f61fb881..9a247cc1d1496 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -19,10 +19,7 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever}; -use frame_support::{ - parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}, -}; +use frame_support::traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ digests::PreDigest, @@ -48,11 +45,6 @@ type DummyValidatorId = u64; type AccountData = u128; -parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); -} - impl frame_system::Config for Test { type Event = Event; type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs index 15cdab95d8887..bfe4e1c79b968 100644 --- a/frame/sassafras/src/session.rs +++ b/frame/sassafras/src/session.rs @@ -18,7 +18,7 @@ //! Sassafras implementation of traits required by session pallet. use super::*; -use frame_support::traits::{EstimateNextSessionRotation, OneSessionHandler}; +use frame_support::traits::{EstimateNextSessionRotation, Hooks, OneSessionHandler}; use pallet_session::ShouldEndSession; use sp_runtime::{traits::SaturatedConversion, Permill}; @@ -28,7 +28,7 @@ impl ShouldEndSession for Pallet { // `should_end_session` from it's own `on_initialize` handler, in which case it's // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we // have initialized the pallet and updated the current slot. - Self::initialize(now); + Self::on_initialize(now); Self::should_end_session(now) } } diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 3eadff59cdd6f..f957346733137 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -27,6 +27,7 @@ use sp_runtime::traits::Get; #[test] fn slot_ticket_fetch() { + let genesis_slot = Slot::from(100); let max_tickets: u32 = ::MaxTickets::get(); assert_eq!(max_tickets, 6); @@ -50,36 +51,48 @@ fn slot_ticket_fetch() { TicketsMeta::::set(TicketsMetadata { tickets_count: [max_tickets, max_tickets - 1], segments_count: 0, - sort_started: false, }); - // Test next session tickets fetch + // Before initializing `GenesisSlot` value (should return first element of current session) + // This is due to special case hardcoded value. assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket(1.into()), Some(curr_tickets[3])); - assert_eq!(Sassafras::slot_ticket(2.into()), Some(curr_tickets[5])); - assert_eq!(Sassafras::slot_ticket(3.into()), None); - assert_eq!(Sassafras::slot_ticket(4.into()), None); - assert_eq!(Sassafras::slot_ticket(5.into()), None); - assert_eq!(Sassafras::slot_ticket(6.into()), None); - assert_eq!(Sassafras::slot_ticket(7.into()), Some(curr_tickets[4])); - assert_eq!(Sassafras::slot_ticket(8.into()), Some(curr_tickets[2])); - assert_eq!(Sassafras::slot_ticket(9.into()), Some(curr_tickets[0])); - - // Test next session tickets fetch - assert_eq!(Sassafras::slot_ticket(10.into()), Some(next_tickets[1])); - assert_eq!(Sassafras::slot_ticket(11.into()), Some(next_tickets[3])); - assert_eq!(Sassafras::slot_ticket(12.into()), None); //Some(next_tickets[5])); - assert_eq!(Sassafras::slot_ticket(13.into()), None); - assert_eq!(Sassafras::slot_ticket(14.into()), None); - assert_eq!(Sassafras::slot_ticket(15.into()), None); - assert_eq!(Sassafras::slot_ticket(16.into()), None); - assert_eq!(Sassafras::slot_ticket(17.into()), Some(next_tickets[4])); - assert_eq!(Sassafras::slot_ticket(18.into()), Some(next_tickets[2])); - assert_eq!(Sassafras::slot_ticket(19.into()), Some(next_tickets[0])); - - // Test fetch beyend next session - assert_eq!(Sassafras::slot_ticket(20.into()), None); - assert_eq!(Sassafras::slot_ticket(42.into()), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 100), Some(curr_tickets[1])); + + // Initialize genesis slot value. + GenesisSlot::::set(genesis_slot); + + // Before Current session. + assert_eq!(Sassafras::slot_ticket(0.into()), None); + + // Current session tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 2), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 3), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 4), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 5), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 6), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 7), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 8), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 9), Some(curr_tickets[0])); + + // Next session tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 10), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 11), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 12), None); //Some(next_tickets[5])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 13), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 14), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 15), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 16), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 17), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 18), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 19), Some(next_tickets[0])); + + // Beyend next session. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 20), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 42), None); }); } From 2d55cc0fecc8c6c7bc16aa69235f348ecc9d1be4 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 2 Sep 2022 15:39:03 +0200 Subject: [PATCH 05/62] Further refactory --- client/consensus/sassafras/src/authorship.rs | 47 +++++++++---------- .../consensus/sassafras/src/block_import.rs | 3 +- client/consensus/sassafras/src/lib.rs | 18 +++---- .../consensus/sassafras/src/verification.rs | 35 ++++++-------- frame/sassafras/src/lib.rs | 14 +++--- frame/sassafras/src/tests.rs | 14 +++--- primitives/consensus/sassafras/src/digests.rs | 10 ++-- primitives/consensus/sassafras/src/lib.rs | 6 +-- 8 files changed, 70 insertions(+), 77 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 5d277bc9e317c..7801ca8475983 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -23,14 +23,17 @@ use super::*; use sp_consensus_sassafras::{ digests::PreDigest, vrf::{make_slot_transcript_data, make_ticket_transcript_data}, - AuthorityId, Slot, Ticket, TicketInfo, + AuthorityId, Slot, Ticket, TicketAux, }; use sp_core::{twox_64, ByteArray}; /// Get secondary authority index for the given epoch and slot. -pub(crate) fn secondary_authority_index(slot: Slot, config: &SassafrasConfiguration) -> u64 { - u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) % - config.authorities.len() as u64 +pub(crate) fn secondary_authority_index( + slot: Slot, + config: &SassafrasConfiguration, +) -> AuthorityIndex { + u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) as AuthorityIndex % + config.authorities.len() as AuthorityIndex } /// Try to claim an epoch slot. @@ -42,14 +45,13 @@ fn claim_slot( keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { let config = &epoch.config; - let (authority_index, ticket_info) = match ticket { + let (authority_idx, ticket_aux) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); - let ticket_info = epoch.tickets_info.get(&ticket)?.clone(); + let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket)?.clone(); log::debug!(target: "sassafras", "🌳 Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", - &ticket.as_bytes()[0..8], ticket_info.authority_index, ticket_info.attempt); - let idx = ticket_info.authority_index as u64; - (idx, Some(ticket_info)) + &ticket.as_bytes()[0..8], authority_idx, ticket_aux.attempt); + (authority_idx, Some(ticket_aux)) }, None => { log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); @@ -57,7 +59,7 @@ fn claim_slot( }, }; - let authority_id = config.authorities.get(authority_index as usize).map(|auth| &auth.0)?; + let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_index); let signature = SyncCryptoStore::sr25519_vrf_sign( @@ -70,19 +72,19 @@ fn claim_slot( .flatten()?; let pre_digest = PreDigest { - authority_index: authority_index as u32, + authority_idx, slot, vrf_output: VRFOutput(signature.output), vrf_proof: VRFProof(signature.proof.clone()), - ticket_info, + ticket_aux, }; Some((pre_digest, authority_id.clone())) } /// Generate the tickets for the given epoch. -/// Tickets additional information (i.e. `TicketInfo`) will be stored within the `Epoch` -/// structure. The additional information will be used during epoch to claim slots. +/// Tickets additional information will be stored within the `Epoch` structure. +/// The additional information will be used later during session to claim slots. pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; @@ -99,7 +101,7 @@ pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) log::debug!(target: "sassafras", "🌳 Tickets threshold: {:032x}", threshold); let authorities = config.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); - for (authority_index, authority_id) in authorities { + for (authority_idx, authority_id) in authorities { if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) { continue @@ -124,19 +126,16 @@ pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) return None } - let ticket_info = TicketInfo { - attempt: attempt as u32, - authority_index: authority_index as u32, - proof: VRFProof(signature.proof), - }; + let ticket_aux = + TicketAux { attempt: attempt as u32, proof: VRFProof(signature.proof) }; - Some((ticket, ticket_info)) + Some((ticket, ticket_aux)) }; for attempt in 0..max_attempts { - if let Some((ticket, ticket_info)) = make_ticket(attempt) { + if let Some((ticket, ticket_aux)) = make_ticket(attempt) { tickets.push(ticket); - epoch.tickets_info.insert(ticket, ticket_info); + epoch.tickets_aux.insert(ticket, (authority_idx as AuthorityIndex, ticket_aux)); } } } @@ -420,7 +419,7 @@ async fn tickets_worker( epoch_changes .shared_data() .epoch_mut(&epoch_identifier) - .map(|epoch| epoch.tickets_info.clear()); + .map(|epoch| epoch.tickets_aux.clear()); } } } diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 3630589aeb46a..9f04314df1bca 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -143,8 +143,7 @@ where let epoch_descriptor = intermediate.epoch_descriptor; let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - let added_weight = pre_digest.ticket_info.is_some() as u32; - let total_weight = parent_weight + added_weight; + let total_weight = parent_weight + pre_digest.ticket_aux.is_some() as u32; // Search for this all the time so we can reject unexpected announcements. let next_epoch_digest = find_next_epoch_digest::(&block.header) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 9cfbeff728970..7b3b90058c836 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -85,9 +85,9 @@ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, vrf::{make_slot_transcript, make_ticket_transcript}, - AuthorityId, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, - SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, TicketInfo, VRFOutput, VRFProof, - SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, SassafrasApi, + SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, + TicketAux, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; mod authorship; @@ -194,8 +194,8 @@ pub struct Epoch { pub start_slot: Slot, /// Epoch configuration pub config: SassafrasConfiguration, - /// Tickets metadata. - pub tickets_info: BTreeMap, + /// Tickets auxiliary data. + pub tickets_aux: BTreeMap, } impl EpochT for Epoch { @@ -214,7 +214,7 @@ impl EpochT for Epoch { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + config.epoch_duration, config, - tickets_info: BTreeMap::new(), + tickets_aux: BTreeMap::new(), } } @@ -235,7 +235,7 @@ impl Epoch { epoch_index: 0, start_slot: slot, config: config.clone(), - tickets_info: BTreeMap::new(), + tickets_aux: BTreeMap::new(), } } } @@ -276,11 +276,11 @@ fn find_pre_digest(header: &B::Header) -> Result> let vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); let vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); return Ok(PreDigest { - authority_index: 0, + authority_idx: 0, slot: 0.into(), vrf_output, vrf_proof, - ticket_info: None, + ticket_aux: None, }) } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index b162fe390ef03..ad0c2a0f10053 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -38,9 +38,9 @@ pub struct VerificationParams<'a, B: 'a + BlockT> { } pub struct VerifiedHeaderInfo { + pub authority_id: AuthorityId, pub pre_digest: DigestItem, pub seal: DigestItem, - pub author: AuthorityId, } /// Check a header has been signed by the right key. If the slot is too far in @@ -63,8 +63,8 @@ pub fn check_header( return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let author = match config.authorities.get(pre_digest.authority_index as usize) { - Some(author) => author.0.clone(), + let authority_id = match config.authorities.get(pre_digest.authority_idx as usize) { + Some(authority_id) => authority_id.0.clone(), None => return Err(sassafras_err(Error::SlotAuthorNotFound)), }; @@ -80,40 +80,35 @@ pub fn check_header( .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; let pre_hash = header.hash(); - if !AuthorityPair::verify(&signature, &pre_hash, &author) { + if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { return Err(sassafras_err(Error::BadSignature(pre_hash))) } // Check authorship method and claim - match (&ticket, &pre_digest.ticket_info) { - (Some(ticket), Some(ticket_info)) => { + match (&ticket, &pre_digest.ticket_aux) { + (Some(ticket), Some(ticket_aux)) => { log::debug!(target: "sassafras", "🌳 checking primary"); - if ticket_info.authority_index != pre_digest.authority_index { - // TODO-SASS-P2 ... we can eventually remove auth index from ticket info - log::error!(target: "sassafras", "🌳 Wrong primary authority index"); - } let transcript = - make_ticket_transcript(&config.randomness, ticket_info.attempt, epoch.epoch_index); - schnorrkel::PublicKey::from_bytes(author.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_info.proof)) + make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_index); + schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) + .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_aux.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { log::debug!(target: "sassafras", "🌳 checking secondary"); let idx = authorship::secondary_authority_index(pre_digest.slot, config); - if idx != pre_digest.authority_index as u64 { - log::error!(target: "sassafras", "🌳 Wrong secondary authority index"); + if idx != pre_digest.authority_idx { + log::error!(target: "sassafras", "🌳 Bad secondary authority index"); + return Err(Error::SlotAuthorNotFound) } }, (Some(_), None) => { log::warn!(target: "sassafras", "🌳 Unexpected secondary authoring mechanism"); - // TODO-SASS-P2: maybe we can use a different error variant return Err(Error::UnexpectedAuthoringMechanism) }, (None, Some(_)) => { log::warn!(target: "sassafras", "🌳 Unexpected primary authoring mechanism"); - // TODO-SASS-P2: maybe we will use a different error variant return Err(Error::UnexpectedAuthoringMechanism) }, } @@ -121,14 +116,14 @@ pub fn check_header( // Check slot-vrf proof let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_index); - schnorrkel::PublicKey::from_bytes(author.as_slice()) + schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; let info = VerifiedHeaderInfo { + authority_id, pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), seal, - author, }; Ok(CheckedHeader::Checked(header, info)) @@ -368,7 +363,7 @@ where slot_now, slot, &block.header, - &verified_info.author, + &verified_info.authority_id, &block.origin, ) .await diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index a87b248526390..c575ef8f33233 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -375,7 +375,7 @@ pub mod pallet { // Current slot should be less than half of epoch duration. let epoch_duration = T::EpochDuration::get(); - if Self::current_slot_epoch_index() >= epoch_duration / 2 { + if Self::current_slot_index() >= epoch_duration / 2 { log::warn!( target: "sassafras::runtime", "🌳 Timeout to propose tickets, bailing out.", @@ -443,14 +443,16 @@ impl Pallet { // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having // started at the slot of block 1. We want to use the same randomness and validator set as // signalled in the genesis, so we don't rotate the epoch. - now != One::one() && Self::current_slot_epoch_index() >= T::EpochDuration::get() + now != One::one() && Self::current_slot_index() >= T::EpochDuration::get() } - fn current_slot_epoch_index() -> u64 { - Self::slot_epoch_index(CurrentSlot::::get()) + /// Current slot index with respect to current epoch. + fn current_slot_index() -> u64 { + Self::slot_index(CurrentSlot::::get()) } - fn slot_epoch_index(slot: Slot) -> u64 { + /// Slot index with respect to current epoch. + fn slot_index(slot: Slot) -> u64 { if *GenesisSlot::::get() == 0 { return 0 } @@ -638,7 +640,7 @@ impl Pallet { pub fn slot_ticket(slot: Slot) -> Option { let epoch_idx = EpochIndex::::get(); let duration = T::EpochDuration::get(); - let mut slot_idx = Self::slot_epoch_index(slot); + let mut slot_idx = Self::slot_index(slot); let mut tickets_meta = TicketsMeta::::get(); let get_ticket_idx = |slot_idx| { diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index f957346733137..7288ff9805e4a 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -123,7 +123,7 @@ fn on_first_block_after_genesis() { assert_eq!(Sassafras::current_slot(), start_slot); assert_eq!(Sassafras::epoch_index(), 0); assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(RandomnessAccumulator::::get(), [0; 32]); @@ -138,7 +138,7 @@ fn on_first_block_after_genesis() { assert_eq!(Sassafras::current_slot(), start_slot); assert_eq!(Sassafras::epoch_index(), 0); assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( @@ -188,7 +188,7 @@ fn on_normal_block() { assert_eq!(Sassafras::current_slot(), start_slot + 1); assert_eq!(Sassafras::epoch_index(), 0); assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::current_slot_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( @@ -206,7 +206,7 @@ fn on_normal_block() { assert_eq!(Sassafras::current_slot(), start_slot + 1); assert_eq!(Sassafras::epoch_index(), 0); assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_epoch_index(), 1); + assert_eq!(Sassafras::current_slot_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( @@ -244,7 +244,7 @@ fn epoch_change_block() { assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); assert_eq!(Sassafras::epoch_index(), 1); assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); - assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32],); assert_eq!( NextRandomness::::get(), @@ -265,7 +265,7 @@ fn epoch_change_block() { assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); assert_eq!(Sassafras::epoch_index(), 1); assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); - assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), @@ -416,7 +416,7 @@ fn block_skips_epochs() { assert_eq!(Sassafras::current_slot(), start_slot + offset); assert_eq!(Sassafras::epoch_index(), 3); assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); - assert_eq!(Sassafras::current_slot_epoch_index(), 0); + assert_eq!(Sassafras::current_slot_index(), 0); // Tickets were discarded let meta = TicketsMeta::::get(); diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index eb318a5caa379..1b5fabc144bf4 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -19,7 +19,7 @@ use super::{ AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, Slot, TicketInfo, SASSAFRAS_ENGINE_ID, + SassafrasEpochConfiguration, Slot, TicketAux, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -32,16 +32,16 @@ use sp_std::vec::Vec; /// Sassafras primary slot assignment pre-digest. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct PreDigest { - /// Validator index. - pub authority_index: AuthorityIndex, + /// Authority index that claimed the slot. + pub authority_idx: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, /// Slot VRF output. pub vrf_output: VRFOutput, /// Slot VRF proof. pub vrf_proof: VRFProof, - /// Ticket information. - pub ticket_info: Option, + /// Ticket auxiliary information for claim check. + pub ticket_aux: Option, } /// Information about the next epoch. This is broadcast in the first block diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 4754081fbc126..56903eb7da7c3 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -113,11 +113,9 @@ pub struct SassafrasEpochConfiguration { /// Ticket type. pub type Ticket = VRFOutput; -/// Ticket information. +/// Ticket auxiliary information. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketInfo { - /// Authority index. - pub authority_index: u32, +pub struct TicketAux { /// Attempt number. pub attempt: u32, /// Ticket proof. From ac2fc9cc9aff746fb6420f4790215c9f37e75c8f Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 24 Sep 2022 18:34:35 +0200 Subject: [PATCH 06/62] Fix after merge --- Cargo.lock | 7 +- bin/node-sassafras/node/src/service.rs | 16 ++--- bin/node-sassafras/runtime/Cargo.toml | 2 - bin/node-sassafras/runtime/src/lib.rs | 68 ++++++++----------- client/consensus/sassafras/src/authorship.rs | 29 ++++---- .../consensus/sassafras/src/block_import.rs | 2 +- client/consensus/sassafras/src/lib.rs | 11 +-- .../consensus/sassafras/src/verification.rs | 33 +++------ frame/sassafras/Cargo.toml | 2 +- frame/sassafras/src/mock.rs | 20 +++--- frame/sassafras/src/tests.rs | 13 ++-- 11 files changed, 82 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 479165db4df03..1e3aa107a9981 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2860,6 +2860,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hex_fmt" version = "0.3.0" @@ -4912,7 +4918,6 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", - "hex-literal", "pallet-balances", "pallet-grandpa", "pallet-sassafras", diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 33f66262c6dda..514d2ff71bd1a 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -1,7 +1,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{BlockBackend, ExecutorProvider}; +use sc_client_api::BlockBackend; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; @@ -29,7 +29,7 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { } } -pub(crate) type FullClient = +pub type FullClient = sc_service::TFullClient>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; @@ -138,11 +138,10 @@ pub fn new_partial( slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), telemetry.as_ref().map(|x| x.handle()), )?; @@ -161,9 +160,6 @@ pub fn new_partial( } fn remote_keystore(_url: &String) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` Err("Remote Keystore not supported.") } @@ -266,9 +262,6 @@ pub fn new_full(mut config: Configuration) -> Result telemetry.as_ref().map(|x| x.handle()), ); - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let slot_duration = sassafras_link.genesis_config().slot_duration(); let sassafras_config = sc_consensus_sassafras::SassafrasParams { @@ -289,9 +282,8 @@ pub fn new_full(mut config: Configuration) -> Result *timestamp, slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, - can_author_with, }; let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_config)?; diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 823e1dc2bd4eb..14268608af6ea 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -45,7 +45,6 @@ pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-fe # Used for runtime benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -hex-literal = { version = "0.3.4", optional = true } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } @@ -84,7 +83,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking", "frame-system/runtime-benchmarks", - "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-sassafras/runtime-benchmarks", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 77d176f03732a..7697be0a3f396 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -138,10 +138,10 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); impl frame_system::offchain::SendTransactionTypes for Runtime where - Call: From, + RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; } parameter_types! { @@ -158,53 +158,28 @@ parameter_types! { // Configure FRAME pallets to include in runtime. impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; - /// Block & extrinsics weights: base values and limits. type BlockWeights = BlockWeights; - /// The maximum length of a block (in bytes). type BlockLength = BlockLength; - /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type RuntimeCall = RuntimeCall; type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. type Index = Index; - /// The index type for blocks. type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. type Hashing = BlakeTwo256; - /// The header type. type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; - /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// Version of the runtime. type Version = Version; - /// Converts a module to the index of the module in `construct_runtime!`. - /// - /// This type is being generated by `construct_runtime!`. type PalletInfo = PalletInfo; - /// What to do if a new account is created. type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); - /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); - /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; - /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -226,8 +201,7 @@ impl pallet_sassafras::Config for Runtime { } impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; type KeyOwnerProofSystem = (); type KeyOwnerProof = >::Proof; @@ -248,7 +222,7 @@ impl pallet_timestamp::Config for Runtime { } impl pallet_balances::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MaxLocks = ConstU32<50>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; @@ -260,7 +234,7 @@ impl pallet_balances::Config for Runtime { } impl pallet_transaction_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -269,13 +243,13 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; } #[cfg(feature = "use-session-pallet")] impl pallet_session::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; type ValidatorIdOf = (); //pallet_staking::StashOf; type ShouldEndSession = Sassafras; @@ -345,10 +319,11 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -518,6 +493,21 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 7801ca8475983..92e65666918e5 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -27,6 +27,8 @@ use sp_consensus_sassafras::{ }; use sp_core::{twox_64, ByteArray}; +use std::pin::Pin; + /// Get secondary authority index for the given epoch and slot. pub(crate) fn secondary_authority_index( slot: Slot, @@ -297,17 +299,15 @@ where .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; let digest_item = ::sassafras_seal(signature); - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.state_action = + let mut block = BlockImportParams::new(BlockOrigin::Own, header); + block.post_digests.push(digest_item); + block.body = Some(body); + block.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, - ); + block + .insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }); - Ok(import_block) + Ok(block) } fn force_authoring(&self) -> bool { @@ -458,7 +458,7 @@ type SlotNotificationSinks = Arc< >; /// Parameters for Sassafras. -pub struct SassafrasParams { +pub struct SassafrasParams { /// The client to use pub client: Arc, /// The keystore that manages the keys of the node. @@ -481,12 +481,10 @@ pub struct SassafrasParams { pub force_authoring: bool, /// The source of timestamps for relative slots pub sassafras_link: SassafrasLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, } /// Start the Sassafras worker. -pub fn start_sassafras( +pub fn start_sassafras( SassafrasParams { client, keystore, @@ -498,8 +496,7 @@ pub fn start_sassafras( create_inherent_data_providers, force_authoring, sassafras_link, - can_author_with, - }: SassafrasParams, + }: SassafrasParams, ) -> Result, sp_consensus::Error> where B: BlockT, @@ -524,7 +521,6 @@ where L: sc_consensus::JustificationSyncLink + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, - CAW: CanAuthorWith + Send + Sync + 'static, ER: std::error::Error + Send + From + From + 'static, { info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); @@ -550,7 +546,6 @@ where sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), sync_oracle, create_inherent_data_providers, - can_author_with, ); let tickets_worker = tickets_worker( diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 9f04314df1bca..555eac3f62638 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -138,7 +138,7 @@ where }; let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; + block.remove_intermediate::>(INTERMEDIATE_KEY)?; let epoch_descriptor = intermediate.epoch_descriptor; let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 7b3b90058c836..d2ad9f274c9b0 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -24,10 +24,8 @@ #![forbid(unsafe_code, missing_docs)] use std::{ - borrow::Cow, collections::{BTreeMap, HashMap}, future::Future, - pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, @@ -67,8 +65,8 @@ use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus::{ - BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, - SelectChain, SyncOracle, + BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain, + SyncOracle, }; use sp_consensus_slots::Slot; use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; @@ -339,7 +337,7 @@ impl SassafrasLink { /// /// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, /// otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( sassafras_link: SassafrasLink, block_import: BI, justification_import: Option>, @@ -348,7 +346,6 @@ pub fn import_queue( create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, - can_author_with: CAW, telemetry: Option, ) -> ClientResult> where @@ -368,7 +365,6 @@ where + Sync + 'static, SelectChain: sp_consensus::SelectChain + 'static, - CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -377,7 +373,6 @@ where select_chain, create_inherent_data_providers, sassafras_link.epoch_changes, - can_author_with, telemetry, sassafras_link.genesis_config, ); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index ad0c2a0f10053..1020df4cd6688 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -130,26 +130,22 @@ pub fn check_header( } /// A verifier for Sassafras blocks. -pub struct SassafrasVerifier { +pub struct SassafrasVerifier { client: Arc, select_chain: SelectChain, create_inherent_data_providers: CIDP, epoch_changes: SharedEpochChanges, - can_author_with: CAW, telemetry: Option, genesis_config: SassafrasConfiguration, } -impl - SassafrasVerifier -{ +impl SassafrasVerifier { /// Constructor. pub fn new( client: Arc, select_chain: SelectChain, create_inherent_data_providers: CIDP, epoch_changes: SharedEpochChanges, - can_author_with: CAW, telemetry: Option, genesis_config: SassafrasConfiguration, ) -> Self { @@ -158,20 +154,18 @@ impl select_chain, create_inherent_data_providers, epoch_changes, - can_author_with, telemetry, genesis_config, } } } -impl SassafrasVerifier +impl SassafrasVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + SassafrasApi, SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { async fn check_inherents( @@ -182,16 +176,6 @@ where create_inherent_data_providers: CIDP::InherentDataProviders, execution_context: ExecutionContext, ) -> Result<(), Error> { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "sassafras", - "🌳 Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - let inherent_res = self .client .runtime_api() @@ -258,8 +242,8 @@ type BlockVerificationResult = Result<(BlockImportParams, Option)>>), String>; #[async_trait::async_trait] -impl Verifier - for SassafrasVerifier +impl Verifier + for SassafrasVerifier where Block: BlockT, Client: HeaderMetadata @@ -270,7 +254,6 @@ where + AuxStore, Client::Api: BlockBuilderApi + SassafrasApi, SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith + Send + Sync, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -404,9 +387,9 @@ where block.header = pre_header; block.post_digests.push(verified_info.seal); - block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box<_>, + block.insert_intermediate( + INTERMEDIATE_KEY, + SassafrasIntermediate:: { epoch_descriptor }, ); block.post_hash = Some(hash); diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index fc0c1940cc50d..7955345f8daef 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -28,9 +28,9 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] +hex-literal = "0.3.4" sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } -hex-literal = "0.3" [features] default = ["std"] diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 9a247cc1d1496..df36f60ede33d 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -46,15 +46,15 @@ type DummyValidatorId = u64; type AccountData = u128; impl frame_system::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; @@ -81,10 +81,10 @@ impl pallet_timestamp::Config for Test { impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; - type Extrinsic = TestXt; + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; } impl pallet_sassafras::Config for Test { @@ -178,20 +178,20 @@ fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { } pub fn make_pre_digest( - authority_index: AuthorityIndex, + authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair, ) -> PreDigest { let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); - PreDigest { authority_index, slot, vrf_output, vrf_proof, ticket_info: None } + PreDigest { authority_idx, slot, vrf_output, vrf_proof, ticket_aux: None } } pub fn make_wrapped_pre_digest( - authority_index: AuthorityIndex, + authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair, ) -> Digest { - let pre_digest = make_pre_digest(authority_index, slot, pair); + let pre_digest = make_pre_digest(authority_idx, slot, pair); let log = DigestItem::PreRuntime(sp_consensus_sassafras::SASSAFRAS_ENGINE_ID, pre_digest.encode()); Digest { logs: vec![log] } diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 7288ff9805e4a..bd253c0c72f40 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -322,11 +322,11 @@ fn submit_enact_claim_tickets() { .map(|(output, _)| output) .collect(); let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(Origin::none(), tickets0).unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), tickets0).unwrap(); let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(Origin::none(), tickets1).unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), tickets1).unwrap(); let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(Origin::none(), tickets2).unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), tickets2).unwrap(); tickets.sort(); tickets.truncate(max_tickets as usize); @@ -395,8 +395,11 @@ fn block_skips_epochs() { .into_iter() .map(|(output, _)| output) .collect(); - Sassafras::submit_tickets(Origin::none(), BoundedVec::truncate_from(tickets.clone())) - .unwrap(); + Sassafras::submit_tickets( + RuntimeOrigin::none(), + BoundedVec::truncate_from(tickets.clone()), + ) + .unwrap(); // Force enact of next tickets assert_eq!(TicketsMeta::::get().segments_count, 1); From 8f437279332e71a5c450d93568aef0251e5fd453 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 24 Sep 2022 19:40:22 +0200 Subject: [PATCH 07/62] Pick some runtime cleanup from Prototype 2.2 --- bin/node-sassafras/node/src/service.rs | 6 +- bin/node-sassafras/runtime/src/lib.rs | 147 ++++++++++--------------- frame/sassafras/src/lib.rs | 61 +++++----- frame/sassafras/src/mock.rs | 3 +- 4 files changed, 95 insertions(+), 122 deletions(-) diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 514d2ff71bd1a..a8cd614882ea7 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -264,7 +264,7 @@ pub fn new_full(mut config: Configuration) -> Result let slot_duration = sassafras_link.genesis_config().slot_duration(); - let sassafras_config = sc_consensus_sassafras::SassafrasParams { + let sassafras_params = sc_consensus_sassafras::SassafrasParams { client: client.clone(), keystore: keystore_container.sync_keystore(), select_chain, @@ -274,7 +274,7 @@ pub fn new_full(mut config: Configuration) -> Result sync_oracle: network.clone(), justification_sync_link: network.clone(), force_authoring, - create_inherent_data_providers: move |_, ()| async move { + create_inherent_data_providers: move |_, _| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); let slot = @@ -286,7 +286,7 @@ pub fn new_full(mut config: Configuration) -> Result }, }; - let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_config)?; + let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_params)?; // the Sassafras authoring task is considered essential, i.e. if it // fails we take down the service with it. diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 7697be0a3f396..e78b280db12da 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -41,21 +41,55 @@ pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; + +/// Block header type as expected by this runtime. +pub type Header = generic::Header; + +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Block type as expected by this runtime. +pub type Block = generic::Block; + /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <::Signer as IdentifyAccount>::AccountId; +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; + /// Balance of an account. pub type Balance = u128; -/// Index of a transaction in the chain. -pub type Index = u32; - -/// A hash of some data used by the chain. -pub type Hash = sp_core::H256; +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; -/// Type used for expressing timestamp. -pub type Moment = u64; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats @@ -65,7 +99,6 @@ pub mod opaque { use super::*; pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - /// Opaque block header type. pub type Header = generic::Header; /// Opaque block type. @@ -81,18 +114,11 @@ impl_opaque_keys! { } } -// To learn more about runtime versioning and what each of the following value means: -// https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-sassafras"), impl_name: create_runtime_str!("node-sassafras"), authoring_version: 1, - // The version of the runtime specification. A full node will not attempt to use its native - // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - // `spec_version`, and `authoring_version` are the same between Wasm and native. - // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use - // the compatible custom types. spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, @@ -100,32 +126,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -/// This determines the average expected block time that we are targeting. -/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. -/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked -/// up by `pallet_sassafras` to implement `fn slot_duration()`. -/// -/// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 6000; - -// NOTE: Currently it is not possible to change the slot duration after the chain has started. -// Attempting to do so will brick block production. -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; +/// Sassafras slot duration in milliseconds +pub const SLOT_DURATION_IN_MILLISECONDS: u64 = 3000; -// TODO-SASS-P4: this is an intentional small value used for testing -pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10; - -pub const EPOCH_DURATION_IN_SLOTS: u64 = { - const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; - - (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 -}; - -// Time is measured by number of blocks. -pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); -pub const HOURS: BlockNumber = MINUTES * 60; -pub const DAYS: BlockNumber = HOURS * 24; +/// Sassafras epoch duration in slots. +pub const EPOCH_DURATION_IN_SLOTS: u64 = 10; +/// Max authorities for both Sassafras and Grandpa. pub const MAX_AUTHORITIES: u32 = 32; /// The version information used to identify this runtime when compiled natively. @@ -134,8 +141,8 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - +// Required to send unsigned transactoins from Sassafras pallet +// TODO-SASS-P2 double check (isn't grandpa requiring the same thing??? impl frame_system::offchain::SendTransactionTypes for Runtime where RuntimeCall: From, @@ -144,6 +151,8 @@ where type OverarchingCall = RuntimeCall; } +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; @@ -184,14 +193,9 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -parameter_types! { - pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; - pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; -} - impl pallet_sassafras::Config for Runtime { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; + type SlotDuration = ConstU64; + type EpochDuration = ConstU64; #[cfg(feature = "use-session-pallet")] type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; #[cfg(not(feature = "use-session-pallet"))] @@ -217,7 +221,7 @@ impl pallet_grandpa::Config for Runtime { impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = (); - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type MinimumPeriod = ConstU64<{ SLOT_DURATION_IN_MILLISECONDS / 2 }>; type WeightInfo = (); } @@ -297,43 +301,6 @@ construct_runtime!( } ); -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; - -/// Block header type as expected by this runtime. -pub type Header = generic::Header; - -/// Block type as expected by this runtime. -pub type Block = generic::Block; - -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); - -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; - -/// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - #[cfg(feature = "runtime-benchmarks")] #[macro_use] extern crate frame_benchmarking; @@ -411,8 +378,8 @@ impl_runtime_apis! { impl sp_consensus_sassafras::SassafrasApi for Runtime { fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { sp_consensus_sassafras::SassafrasConfiguration { - slot_duration: Sassafras::slot_duration(), - epoch_duration: EpochDuration::get(), + slot_duration: SLOT_DURATION_IN_MILLISECONDS, + epoch_duration: EPOCH_DURATION_IN_SLOTS, authorities: Sassafras::authorities().to_vec(), randomness: Sassafras::randomness(), threshold_params: Sassafras::config(), @@ -435,9 +402,7 @@ impl_runtime_apis! { SessionKeys::generate(seed) } - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { + fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>> { SessionKeys::decode_into_raw_public_keys(&encoded) } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index c575ef8f33233..d77e34344f835 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -98,21 +98,14 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: pallet_timestamp::Config + SendTransactionTypes> { - /// The amount of time, in slots, that each epoch should last. - /// NOTE: Currently it is not possible to change the epoch duration after the chain has - /// started. Attempting to do so will brick block production. + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// The amount of time, in milliseconds, that each slot should last. #[pallet::constant] - type EpochDuration: Get; + type SlotDuration: Get; - /// The expected average block time at which Sassafras should be creating - /// blocks. Since Sassafras is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). + /// The amount of time, in slots, that each epoch should last. #[pallet::constant] - type ExpectedBlockTime: Get; + type EpochDuration: Get; /// Sassafras requires some logic to be triggered on every block to query for whether an /// epoch has ended and to perform the transition to the next epoch. @@ -130,13 +123,11 @@ pub mod pallet { type MaxTickets: Get; } - // TODO-SASS-P2 /// Sassafras runtime errors. #[pallet::error] pub enum Error { /// Submitted configuration is invalid. InvalidConfiguration, - // TODO-SASS P2 ... } /// Current epoch index. @@ -302,6 +293,7 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Submit next epoch tickets. + /// /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remov ethe weight? #[pallet::weight(10_000)] pub fn submit_tickets( @@ -321,16 +313,22 @@ pub mod pallet { Ok(()) } - /// Plan an epoch config change. The epoch config change is recorded and will be enacted on - /// the next call to `enact_session_change`. The config will be activated one epoch after. - /// Multiple calls to this method will replace any existing planned config change that had - /// not been enacted yet. + /// Plan an epoch config change. + /// + /// The epoch config change is recorded and will be enacted on the next call to + /// `enact_session_change`. + /// + /// The config will be activated one epoch after. Multiple calls to this method will + /// replace any existing planned config change that had not been enacted yet. + /// + /// TODO: TODO-SASS-P4: proper weight #[pallet::weight(10_000)] pub fn plan_config_change( origin: OriginFor, config: SassafrasEpochConfiguration, ) -> DispatchResult { ensure_root(origin)?; + ensure!( config.redundancy_factor != 0 && config.attempts_number != 0, Error::::InvalidConfiguration @@ -425,13 +423,13 @@ pub mod pallet { // Inherent methods impl Pallet { - /// Determine the Sassafras slot duration based on the Timestamp module configuration. - pub fn slot_duration() -> T::Moment { - // TODO-SASS-P2: clarify why this is doubled (copied verbatim from BABE) - // We double the minimum block-period so each author can always propose within - // the majority of their slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) - } + // // TODO-SASS-P2: I don't think this is really required + // /// Determine the Sassafras slot duration based on the Timestamp module configuration. + // pub fn slot_duration() -> T::Moment { + // // We double the minimum block-period so each author can always propose within + // // the majority of their slot. + // ::MinimumPeriod::get().saturating_mul(2u32.into()) + // } /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. @@ -728,17 +726,26 @@ impl Pallet { metadata.segments_count = segments_count; } - /// Submit next epoch validator tickets via an unsigned extrinsic. + /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to + /// `submit_unsigned_transaction`. + /// /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has /// is called within the first half of the epoch. That is, tickets received within the /// second half are dropped. + /// /// TODO-SASS-P3: we have to add the zk validity proofs pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); tickets.sort_unstable(); let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; - SubmitTransaction::>::submit_unsigned_transaction(call.into()).is_ok() + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(_) => true, + Err(e) => { + log::error!(target: "runtime::sassafras", "Error submitting tickets {:?}", e); + false + }, + } } } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index df36f60ede33d..a8c9ca6e856d7 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -35,6 +35,7 @@ use sp_runtime::{ traits::IdentityLookup, }; +const SLOT_DURATION: u64 = 1000; const EPOCH_DURATION: u64 = 10; const MAX_TICKETS: u32 = 6; @@ -88,8 +89,8 @@ where } impl pallet_sassafras::Config for Test { + type SlotDuration = ConstU64; type EpochDuration = ConstU64; - type ExpectedBlockTime = ConstU64<1>; type EpochChangeTrigger = SameAuthoritiesForever; type MaxAuthorities = ConstU32<10>; type MaxTickets = ConstU32; From 2b516907c70fc8c96aa79917174e736ab2ba80f7 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 29 Oct 2022 17:07:41 +0200 Subject: [PATCH 08/62] Sassafras Prototype 2.2 (#12314) * First experiments with equivocations report * Good enough set of tests for client and pallet code * Better submit-tickets extrinsic tag (hashed) and longevity * Aux data revert implementation * Handle skipped epochs on block-import * Fix in the skipped epochs management code * Insert tickets aux data after block import * Working next epoch tickets incremental sort --- Cargo.lock | 8 + bin/node-sassafras/node/src/command.rs | 7 +- bin/node-sassafras/node/src/service.rs | 3 +- bin/node-sassafras/runtime/src/lib.rs | 15 + client/consensus/babe/src/tests.rs | 15 +- client/consensus/epochs/src/lib.rs | 10 +- client/consensus/sassafras/Cargo.toml | 8 + client/consensus/sassafras/src/authorship.rs | 86 +- client/consensus/sassafras/src/aux_schema.rs | 75 +- .../consensus/sassafras/src/block_import.rs | 127 ++- client/consensus/sassafras/src/lib.rs | 25 +- client/consensus/sassafras/src/tests.rs | 972 ++++++++++++++++++ .../consensus/sassafras/src/verification.rs | 66 +- frame/sassafras/src/lib.rs | 184 ++-- frame/sassafras/src/mock.rs | 25 +- frame/sassafras/src/session.rs | 4 +- frame/sassafras/src/tests.rs | 167 ++- primitives/consensus/babe/src/lib.rs | 1 + primitives/consensus/sassafras/src/lib.rs | 57 +- test-utils/runtime/Cargo.toml | 4 + test-utils/runtime/src/lib.rs | 132 ++- test-utils/runtime/src/system.rs | 2 + 22 files changed, 1783 insertions(+), 210 deletions(-) create mode 100644 client/consensus/sassafras/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 1e3aa107a9981..254e5fb4149af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8340,10 +8340,13 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.1", + "sc-block-builder", "sc-client-api", "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", + "sc-keystore", + "sc-network-test", "sc-telemetry", "schnorrkel", "sp-api", @@ -8356,9 +8359,12 @@ dependencies = [ "sp-consensus-vrf", "sp-core", "sp-inherents", + "sp-keyring", "sp-keystore", "sp-runtime", + "sp-timestamp", "substrate-prometheus-endpoint", + "substrate-test-runtime-client", "thiserror", ] @@ -10807,6 +10813,7 @@ dependencies = [ "log", "memory-db", "pallet-babe", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "parity-util-mem", @@ -10821,6 +10828,7 @@ dependencies = [ "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-finality-grandpa", diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index 74ac7dc809802..fad50283d2440 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -10,7 +10,7 @@ use sc_service::PartialComponents; impl SubstrateCli for Cli { fn impl_name() -> String { - "Substrate Node".into() + "Sassafras Node".into() } fn impl_version() -> String { @@ -30,7 +30,7 @@ impl SubstrateCli for Cli { } fn copyright_start_year() -> i32 { - 2017 + 2022 } fn load_spec(&self, id: &str) -> Result, String> { @@ -96,7 +96,8 @@ pub fn run() -> sc_cli::Result<()> { runner.async_run(|config| { let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?; - let aux_revert = Box::new(|client, _, blocks| { + let aux_revert = Box::new(|client, backend, blocks| { + sc_consensus_sassafras::revert(backend, blocks)?; sc_finality_grandpa::revert(client, blocks)?; Ok(()) }); diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index a8cd614882ea7..1f7beb20f3609 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -264,7 +264,7 @@ pub fn new_full(mut config: Configuration) -> Result let slot_duration = sassafras_link.genesis_config().slot_duration(); - let sassafras_params = sc_consensus_sassafras::SassafrasParams { + let sassafras_params = sc_consensus_sassafras::SassafrasWorkerParams { client: client.clone(), keystore: keystore_container.sync_keystore(), select_chain, @@ -303,7 +303,6 @@ pub fn new_full(mut config: Configuration) -> Result if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), justification_period: 512, name: Some(name), diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index e78b280db12da..f0ef011b14d39 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -395,6 +395,21 @@ impl_runtime_apis! { fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { Sassafras::slot_ticket(slot) } + + fn generate_key_ownership_proof( + _slot: sp_consensus_sassafras::Slot, + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + //let key_owner_proof = key_owner_proof.decode()?; + Sassafras::submit_unsigned_equivocation_report(equivocation_proof) + } } impl sp_session::SessionKeys for Runtime { diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 58f5e7b8eb6d4..909a8604c138c 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -36,7 +36,6 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ @@ -68,8 +67,6 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; -const SLOT_DURATION_MS: u64 = 1000; - #[derive(Clone)] struct DummyFactory { client: Arc, @@ -318,14 +315,15 @@ impl TestNetFactory for BabeTestNet { let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let slot_duration = data.link.config.slot_duration(); TestVerifier { inner: BabeVerifier { client: client.clone(), select_chain: longest_chain, - create_inherent_data_providers: Box::new(|_, _| async { + create_inherent_data_providers: Box::new(move |_, _| async move { let slot = InherentDataProvider::from_timestamp_and_slot_duration( Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + slot_duration, ); Ok((slot,)) }), @@ -425,6 +423,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static .for_each(|_| future::ready(())), ); + let slot_duration = data.link.config.slot_duration(); babe_futures.push( start_babe(BabeParams { block_import: data.block_import.lock().take().expect("import set up during init"), @@ -432,10 +431,10 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static client, env: environ, sync_oracle: DummyOracle, - create_inherent_data_providers: Box::new(|_, _| async { + create_inherent_data_providers: Box::new(move |_, _| async move { let slot = InherentDataProvider::from_timestamp_and_slot_duration( Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + slot_duration, ); Ok((slot,)) }), @@ -1004,7 +1003,7 @@ fn obsolete_blocks_aux_data_cleanup() { let data = peer.data.as_ref().expect("babe link set up during initialization"); let client = peer.client().as_client(); - // Register the handler (as done by `babe_start`) + // Register the handler (as done by Babe's `block_import` method) let client_clone = client.clone(); let on_finality = move |summary: &FinalityNotification| { aux_storage_cleanup(client_clone.as_ref(), summary) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 2e0186495db5e..994f3789f4515 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -777,11 +777,6 @@ where } } - /// Return the inner fork tree. - pub fn tree(&self) -> &ForkTree> { - &self.inner - } - /// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and /// `hash`. pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) { @@ -832,6 +827,11 @@ where self.epochs.remove(&(h, n)); }); } + + /// Return the inner fork tree (mostly useful for testing) + pub fn tree(&self) -> &ForkTree> { + &self.inner + } } /// Type alias to produce the epoch-changes tree from a block type. diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 888959090b31a..a6c6bb59984f1 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -40,3 +40,11 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } + +[dev-dependencies] +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } \ No newline at end of file diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 92e65666918e5..1f8f7b3be3787 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -40,13 +40,18 @@ pub(crate) fn secondary_authority_index( /// Try to claim an epoch slot. /// If ticket is `None`, then the slot should be claimed using the fallback mechanism. -fn claim_slot( +pub(crate) fn claim_slot( slot: Slot, epoch: &Epoch, ticket: Option, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { let config = &epoch.config; + + if config.authorities.is_empty() { + return None + } + let (authority_idx, ticket_aux) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); @@ -63,7 +68,7 @@ fn claim_slot( let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; - let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_index); + let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_idx); let signature = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, @@ -77,7 +82,7 @@ fn claim_slot( authority_idx, slot, vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof.clone()), + vrf_proof: VRFProof(signature.proof), ticket_aux, }; @@ -87,7 +92,7 @@ fn claim_slot( /// Generate the tickets for the given epoch. /// Tickets additional information will be stored within the `Epoch` structure. /// The additional information will be used later during session to claim slots. -pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { +fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; let redundancy_factor = config.threshold_params.redundancy_factor; @@ -111,7 +116,7 @@ pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) let make_ticket = |attempt| { let transcript_data = - make_ticket_transcript_data(&config.randomness, attempt, epoch.epoch_index); + make_ticket_transcript_data(&config.randomness, attempt, epoch.epoch_idx); // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. @@ -144,7 +149,7 @@ pub fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) tickets } -struct SassafrasSlotWorker { +struct SlotWorker { client: Arc, block_import: I, env: E, @@ -159,7 +164,7 @@ struct SassafrasSlotWorker { #[async_trait::async_trait] impl sc_consensus_slots::SimpleSlotWorker - for SassafrasSlotWorker + for SlotWorker where B: BlockT, C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, @@ -315,7 +320,7 @@ where } fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { - // TODO-SASS-P2 + // TODO-SASS-P3 false } @@ -336,7 +341,7 @@ where } fn telemetry(&self) -> Option { - // TODO-SASS-P2 + // TODO-SASS-P4 None } @@ -357,7 +362,14 @@ where } } -async fn tickets_worker( +/// Authoring tickets generation worker. +/// +/// Listens on the client's import notification stream for blocks which contains new epoch +/// information, that is blocks that signals the begin of a new epoch. +/// This event here triggers the begin of the generation of tickets for the next epoch. +/// The tickets generated by the worker are saved within the epoch changes tree +/// and are volatile. +async fn start_tickets_worker( client: Arc, keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, @@ -369,6 +381,7 @@ async fn tickets_worker( SC: SelectChain + 'static, { let mut notifications = client.import_notification_stream(); + while let Some(notification) = notifications.next().await { let epoch_desc = match find_next_epoch_digest::(¬ification.header) { Ok(Some(epoch_desc)) => epoch_desc, @@ -379,7 +392,7 @@ async fn tickets_worker( _ => continue, }; - debug!(target: "sassafras", "🌳 New epoch annouced {:x?}", epoch_desc); + debug!(target: "sassafras", "🌳 New epoch announced {:x?}", epoch_desc); let number = *notification.header.number(); let position = if number == One::one() { @@ -389,17 +402,20 @@ async fn tickets_worker( }; let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; - let tickets = epoch_changes - .shared_data() - .epoch_mut(&epoch_identifier) - .map(|epoch| authorship::generate_epoch_tickets(epoch, &keystore)) - .unwrap_or_default(); + let mut epoch = match epoch_changes.shared_data().epoch(&epoch_identifier).cloned() { + Some(epoch) => epoch, + None => { + warn!(target: "🌳 sassafras", "Unexpected missing epoch data for {:?}", epoch_identifier); + continue + }, + }; + let tickets = generate_epoch_tickets(&mut epoch, &keystore); if tickets.is_empty() { continue } - // Get the best block on which we will build and send the tickets. + // Get the best block on which we will publish the tickets. let best_id = match select_chain.best_chain().await { Ok(header) => BlockId::Hash(header.hash()), Err(err) => { @@ -413,13 +429,20 @@ async fn tickets_worker( Ok(false) => Some("Unknown reason".to_string()), _ => None, }; - if let Some(err) = err { - error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); - // Remove tickets from epoch tree node. - epoch_changes - .shared_data() - .epoch_mut(&epoch_identifier) - .map(|epoch| epoch.tickets_aux.clear()); + + match err { + None => { + // Cache tickets in the epoch changes tree + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|target_epoch| target_epoch.tickets_aux = epoch.tickets_aux); + // TODO-SASS-P4: currently we don't persist the tickets proofs + // Thus on reboot/crash we are loosing them. + }, + Some(err) => { + error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + }, } } } @@ -458,7 +481,7 @@ type SlotNotificationSinks = Arc< >; /// Parameters for Sassafras. -pub struct SassafrasParams { +pub struct SassafrasWorkerParams { /// The client to use pub client: Arc, /// The keystore that manages the keys of the node. @@ -477,15 +500,15 @@ pub struct SassafrasParams { pub justification_sync_link: L, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, - /// Force authoring of blocks even if we are offline + /// Force authoring of blocks even if we are offline. pub force_authoring: bool, - /// The source of timestamps for relative slots + /// State shared between import queue and authoring worker. pub sassafras_link: SassafrasLink, } /// Start the Sassafras worker. pub fn start_sassafras( - SassafrasParams { + SassafrasWorkerParams { client, keystore, select_chain, @@ -496,14 +519,13 @@ pub fn start_sassafras( create_inherent_data_providers, force_authoring, sassafras_link, - }: SassafrasParams, + }: SassafrasWorkerParams, ) -> Result, sp_consensus::Error> where B: BlockT, C: ProvideRuntimeApi + ProvideUncles + BlockchainEvents - + PreCommitActions + HeaderBackend + HeaderMetadata + Send @@ -527,7 +549,7 @@ where let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); - let slot_worker = SassafrasSlotWorker { + let slot_worker = SlotWorker { client: client.clone(), block_import, env, @@ -548,7 +570,7 @@ where create_inherent_data_providers, ); - let tickets_worker = tickets_worker( + let tickets_worker = start_tickets_worker( client.clone(), keystore, sassafras_link.epoch_changes.clone(), diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 07f723341b069..8c891ea0630f3 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -17,14 +17,20 @@ // along with this program. If not, see . //! Schema for auxiliary data persistence. +//! +//! TODO-SASS-P2 : RENAME FROM aux_schema.rs => aux_data.rs + +use std::{collections::HashSet, sync::Arc}; use scale_codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; + +use sc_client_api::{blockchain::Backend as _, Backend as BackendT}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus_sassafras::SassafrasBlockWeight; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, NumberFor, SaturatedConversion, Zero}; use crate::Epoch; @@ -99,3 +105,68 @@ pub fn load_block_weight( ) -> ClientResult> { load_decode(backend, block_weight_key(block_hash).as_slice()) } + +/// Reverts protocol aux data from the best block to at most the last finalized block. +/// +/// Epoch-changes and block weights announced after the revert point are removed. +pub fn revert(backend: Arc, blocks: NumberFor) -> ClientResult<()> +where + Block: BlockT, + Backend: BackendT, +{ + let blockchain = backend.blockchain(); + let best_number = blockchain.info().best_number; + let finalized = blockchain.info().finalized_number; + + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } + + let revert_up_to_number = best_number - revertible; + let revert_up_to_hash = blockchain.hash(revert_up_to_number)?.ok_or(ClientError::Backend( + format!("Unexpected hash lookup failure for block number: {}", revert_up_to_number), + ))?; + + // Revert epoch changes tree. + + // This config is only used on-genesis. + let epoch_changes = load_epoch_changes::(&*backend)?; + let mut epoch_changes = epoch_changes.shared_data(); + + if revert_up_to_number == Zero::zero() { + // Special case, no epoch changes data were present on genesis. + *epoch_changes = EpochChangesFor::::new(); + } else { + let descendent_query = sc_consensus_epochs::descendent_query(blockchain); + epoch_changes.revert(descendent_query, revert_up_to_hash, revert_up_to_number); + } + + // Remove block weights added after the revert point. + + let mut weight_keys = HashSet::with_capacity(revertible.saturated_into()); + + let leaves = backend.blockchain().leaves()?.into_iter().filter(|&leaf| { + sp_blockchain::tree_route(blockchain, revert_up_to_hash, leaf) + .map(|route| route.retracted().is_empty()) + .unwrap_or_default() + }); + + for mut hash in leaves { + loop { + let meta = blockchain.header_metadata(hash)?; + if meta.number <= revert_up_to_number || !weight_keys.insert(block_weight_key(hash)) { + // We've reached the revert point or an already processed branch, stop here. + break + } + hash = meta.parent; + } + } + + let weight_keys: Vec<_> = weight_keys.iter().map(|val| val.as_slice()).collect(); + + // Write epoch changes and remove weights in one shot. + write_epoch_changes::(&epoch_changes, |values| { + AuxStore::insert_aux(&*backend, values, weight_keys.iter()) + }) +} diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 555eac3f62638..01e804ecf3ea1 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -19,6 +19,7 @@ //! Types and functions related to block import. use super::*; +use sc_client_api::{AuxDataOperations, FinalityNotification, PreCommitActions}; /// A block-import handler for Sassafras. /// @@ -45,7 +46,26 @@ impl Clone for SassafrasBlockImport SassafrasBlockImport { +fn aux_storage_cleanup( + _client: &C, + _notification: &FinalityNotification, +) -> AuxDataOperations +where + B: BlockT, + C: HeaderMetadata + HeaderBackend, +{ + // TODO-SASS-P3 + Default::default() +} + +impl SassafrasBlockImport +where + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, +{ /// Constructor. pub fn new( inner: I, @@ -53,6 +73,16 @@ impl SassafrasBlockImport { epoch_changes: SharedEpochChanges, genesis_config: SassafrasConfiguration, ) -> Self { + let client_weak = Arc::downgrade(&client); + let on_finality = move |notification: &FinalityNotification| { + if let Some(client) = client_weak.upgrade() { + aux_storage_cleanup(client.as_ref(), notification) + } else { + Default::default() + } + }; + client.register_finality_action(Box::new(on_finality)); + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } } } @@ -82,9 +112,8 @@ where let hash = block.post_hash(); let number = *block.header.number(); - let pre_digest = find_pre_digest::(&block.header).expect( - "valid sassafras headers must contain a predigest; header has been already verified; qed", - ); + let pre_digest = find_pre_digest::(&block.header) + .expect("valid headers contain a pre-digest; header has been already verified; qed"); let slot = pre_digest.slot; let parent_hash = *block.header.parent_hash(); @@ -98,10 +127,9 @@ where ) })?; - let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot).expect( - "parent is non-genesis; valid Sassafras headers contain a pre-digest; \ - header has already been verified; qed", - ); + let parent_slot = find_pre_digest::(&parent_header) + .map(|d| d.slot) + .expect("parent is non-genesis; valid headers contain a pre-digest; header has been already verified; qed"); // Make sure that slot number is strictly increasing if slot <= parent_slot { @@ -161,30 +189,57 @@ where _ => (), } - let info = self.client.info(); - if let Some(next_epoch_descriptor) = next_epoch_digest { old_epoch_changes = Some((*epoch_changes).clone()); - let viable_epoch = epoch_changes + let mut viable_epoch = epoch_changes .viable_epoch(&epoch_descriptor, |slot| { Epoch::genesis(&self.genesis_config, slot) }) .ok_or_else(|| { ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + })? + .into_cloned(); + + if viable_epoch.as_ref().end_slot() <= slot { + // Some epochs must have been skipped as our current slot fits outside the + // current epoch. We will figure out which is the first skipped epoch and we + // will partially re-use its data for this "recovery" epoch. + let epoch_data = viable_epoch.as_mut(); + let skipped_epochs = + (*slot - *epoch_data.start_slot) / epoch_data.config.epoch_duration; + let original_epoch_idx = epoch_data.epoch_idx; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // We do not update the original epoch that may be reused because there may be + // some other forks where the epoch isn't skipped. + // Not updating the original epoch works because when we search the tree for + // which epoch to use for a given slot, we will search in-depth with the + // predicate `epoch.start_slot <= slot` which will still match correctly without + // requiring to update `start_slot` to the correct value. + epoch_data.epoch_idx += skipped_epochs; + epoch_data.start_slot = Slot::from( + *epoch_data.start_slot + skipped_epochs * epoch_data.config.epoch_duration, + ); + log::warn!( + target: "sassafras", + "🌳 Epoch(s) skipped from {} to {}", + original_epoch_idx, epoch_data.epoch_idx + ); + } - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info + // Restrict info logging during initial sync to avoid spam + let log_level = match block.origin { + BlockOrigin::NetworkInitialSync => log::Level::Debug, + _ => log::Level::Info, }; log!(target: "sassafras", log_level, "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, + viable_epoch.as_ref().epoch_idx, hash, slot, viable_epoch.as_ref().start_slot, @@ -246,18 +301,16 @@ where .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) }); - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. + // The fork choice rule is that we pick the heaviest chain (i.e. more blocks built + // using primary mechanism), if there's a tie we go with the longest chain. block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { + let info = self.client.info(); + let best_weight = if &info.best_hash == block.header.parent_hash() { // the parent=genesis case is already covered for loading parent weight, // so we don't need to cover again here. parent_weight } else { - aux_schema::load_block_weight(&*self.client, last_best) + aux_schema::load_block_weight(&*self.client, &info.best_hash) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? .ok_or_else(|| { ConsensusError::ChainLookup( @@ -266,13 +319,9 @@ where })? }; - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) + let is_new_best = total_weight > best_weight || + (total_weight == best_weight && number > info.best_number); + Some(ForkChoiceStrategy::Custom(is_new_best)) }; // Release the mutex, but it stays locked epoch_changes.release_mutex() @@ -317,12 +366,10 @@ where let finalized_header = client .header(BlockId::Hash(info.finalized_hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect( - "best finalized hash was given by client; finalized headers must exist in db; qed", - ); + .expect("finalized headers must exist in db; qed"); find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .expect("valid blocks have a pre-digest; qed") .slot }; @@ -342,14 +389,18 @@ where /// an import-queue. /// /// Also returns a link object used to correctly instantiate the import queue -/// and background worker. +/// and authoring worker. pub fn block_import( genesis_config: SassafrasConfiguration, inner_block_import: I, client: Arc, ) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> where - C: AuxStore + HeaderBackend + HeaderMetadata + 'static, + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, { let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index d2ad9f274c9b0..f5134e38266f7 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -20,7 +20,8 @@ //! //! TODO-SASS-P2: documentation -#![deny(warnings)] +// TODO-SASS-P2: remove this +//#![deny(warnings)] #![forbid(unsafe_code, missing_docs)] use std::{ @@ -41,9 +42,7 @@ use prometheus_endpoint::Registry; use scale_codec::{Decode, Encode}; use schnorrkel::SignatureError; -use sc_client_api::{ - backend::AuxStore, BlockchainEvents, PreCommitActions, ProvideUncles, UsageProvider, -}; +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; use sc_consensus::{ block_import::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, @@ -56,9 +55,7 @@ use sc_consensus_epochs::{ descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, SharedEpochChanges, ViableEpochDescriptor, }; -use sc_consensus_slots::{ - check_equivocation, CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges, -}; +use sc_consensus_slots::{CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::AppKey; @@ -91,9 +88,13 @@ pub use sp_consensus_sassafras::{ mod authorship; mod aux_schema; mod block_import; +#[cfg(test)] +mod tests; mod verification; -pub use authorship::{start_sassafras, SassafrasParams, SassafrasWorker}; +// Export core components. +pub use authorship::{start_sassafras, SassafrasWorker, SassafrasWorkerParams}; +pub use aux_schema::revert; pub use block_import::{block_import, SassafrasBlockImport}; pub use verification::SassafrasVerifier; @@ -187,7 +188,7 @@ fn sassafras_err(error: Error) -> Error { #[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] pub struct Epoch { /// The epoch index. - pub epoch_index: u64, + pub epoch_idx: u64, /// The starting slot of the epoch. pub start_slot: Slot, /// Epoch configuration @@ -209,7 +210,7 @@ impl EpochT for Epoch { threshold_params: descriptor.config.unwrap_or(self.config.threshold_params.clone()), }; Epoch { - epoch_index: self.epoch_index + 1, + epoch_idx: self.epoch_idx + 1, start_slot: self.start_slot + config.epoch_duration, config, tickets_aux: BTreeMap::new(), @@ -221,7 +222,7 @@ impl EpochT for Epoch { } fn end_slot(&self) -> Slot { - self.start_slot + self.config.slot_duration + self.start_slot + self.config.epoch_duration } } @@ -230,7 +231,7 @@ impl Epoch { /// the first block, so that has to be provided. pub fn genesis(config: &SassafrasConfiguration, slot: Slot) -> Epoch { Epoch { - epoch_index: 0, + epoch_idx: 0, start_slot: slot, config: config.clone(), tickets_aux: BTreeMap::new(), diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs new file mode 100644 index 0000000000000..5cf0bef6795e8 --- /dev/null +++ b/client/consensus/sassafras/src/tests.rs @@ -0,0 +1,972 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Sassafras client tests + +// TODO-SASS-P2 +// Missing interesting tests: +// - verify block claimed via primary method + +use super::*; + +use futures::executor::block_on; +use std::sync::Arc; + +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::Finalizer; +use sc_consensus::{BlockImport, BoxJustificationImport}; +use sc_network_test::*; +use sp_application_crypto::key_types::SASSAFRAS; +use sp_blockchain::Error as TestError; +use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus_sassafras::{inherents::InherentDataProvider, vrf::make_slot_transcript_data}; +use sp_keyring::Sr25519Keyring; +use sp_keystore::testing::KeyStore as TestKeyStore; +use sp_runtime::{Digest, DigestItem}; +use sp_timestamp::Timestamp; + +use substrate_test_runtime_client::{runtime::Block as TestBlock, Backend as TestBackend}; + +// Monomorphization of generic structures for the test context. + +type BlockId = crate::BlockId; + +type TestHeader = ::Header; + +type TestClient = substrate_test_runtime_client::client::Client< + TestBackend, + substrate_test_runtime_client::ExecutorDispatch, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, +>; + +type TestSelectChain = + substrate_test_runtime_client::LongestChain; + +type TestTransaction = + sc_client_api::TransactionFor; + +type TestBlockImportParams = BlockImportParams; + +type TestViableEpochDescriptor = sc_consensus_epochs::ViableEpochDescriptor; + +// Monomorphization of Sassafras structures for the test context. + +type SassafrasIntermediate = crate::SassafrasIntermediate; + +type SassafrasBlockImport = crate::SassafrasBlockImport>; + +type SassafrasVerifier = crate::SassafrasVerifier< + TestBlock, + PeersFullClient, + TestSelectChain, + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (InherentDataProvider,), + >, + >, +>; + +type SassafrasLink = crate::SassafrasLink; + +// Epoch duration is slots +const EPOCH_DURATION: u64 = 6; +// Slot duration is milliseconds +const SLOT_DURATION: u64 = 1000; + +struct TestProposer { + client: Arc, + link: SassafrasLink, + parent_hash: Hash, + parent_number: u64, + parent_slot: Slot, +} + +impl TestProposer { + fn propose_block(self, digest: Digest) -> TestBlock { + block_on(self.propose(InherentData::default(), digest, Duration::default(), None)) + .expect("Proposing block") + .block + } +} + +impl Proposer for TestProposer { + type Error = TestError; + type Transaction = TestTransaction; + type Proposal = future::Ready, Self::Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); + + fn propose( + self, + _: InherentData, + inherent_digests: Digest, + _: Duration, + _: Option, + ) -> Self::Proposal { + let block_builder = self + .client + .new_block_at(&BlockId::Hash(self.parent_hash), inherent_digests, false) + .unwrap(); + + let mut block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + // Currently the test runtime doesn't invoke each pallet Hooks such as `on_initialize` and + // `on_finalize`. Thus we have to manually figure out if we should add a consensus digest. + + let this_slot = crate::find_pre_digest::(block.header()) + .expect("baked block has valid pre-digest") + .slot; + + let epoch_changes = self.link.epoch_changes.shared_data(); + let epoch = epoch_changes + .epoch_data_for_child_of( + descendent_query(&*self.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| Epoch::genesis(&self.link.genesis_config, slot), + ) + .expect("client has data to find epoch") + .expect("can compute epoch for baked block"); + + let first_in_epoch = self.parent_slot < epoch.start_slot; + if first_in_epoch { + // push a `Consensus` digest signalling next change. + // we just reuse the same randomness and authorities as the prior + // epoch. this will break when we add light client support, since + // that will re-check the randomness logic off-chain. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: epoch.config.authorities.clone(), + randomness: epoch.config.randomness, + config: None, + }) + .encode(); + let digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, digest_data); + block.header.digest_mut().push(digest) + } + + future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + } +} + +struct TestContext { + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + verifier: SassafrasVerifier, +} + +fn create_test_verifier( + client: Arc, + link: &SassafrasLink, + config: SassafrasConfiguration, +) -> SassafrasVerifier { + let slot_duration = config.slot_duration(); + + let create_inherent_data_providers = Box::new(move |_, _| async move { + let slot = InherentDataProvider::from_timestamp_and_slot_duration( + Timestamp::current(), + slot_duration, + ); + Ok((slot,)) + }); + + let (_, longest_chain) = TestClientBuilder::with_default_backend().build_with_longest_chain(); + + SassafrasVerifier::new( + client.clone(), + longest_chain, + create_inherent_data_providers, + link.epoch_changes.clone(), + None, + config, + ) +} + +fn create_test_block_import( + client: Arc, + config: SassafrasConfiguration, +) -> (SassafrasBlockImport, SassafrasLink) { + crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import") +} + +impl TestContext { + fn new() -> Self { + let (client, backend) = TestClientBuilder::with_default_backend().build_with_backend(); + let client = Arc::new(client); + + // Note: configuration is loaded using the `TestClient` instance as the runtime-api + // provider. In practice this will use the values defined within the test runtime + // defined in the `substrate_test_runtime` crate. + let config = crate::configuration(&*client).expect("config available"); + + let (block_import, link) = create_test_block_import(client.clone(), config.clone()); + + let verifier = create_test_verifier(client.clone(), &link, config.clone()); + + Self { client, backend, link, block_import, verifier } + } + + // This is a bit hacky solution to use `TestContext` as an `Environment` implementation + fn new_with_pre_built_data( + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + ) -> Self { + let verifier = create_test_verifier(client.clone(), &link, link.genesis_config.clone()); + Self { client, backend, link, block_import, verifier } + } + + fn import_block(&mut self, mut params: TestBlockImportParams) -> Hash { + let post_hash = params.post_hash(); + + if params.post_digests.is_empty() { + // Assume that the seal has not been removed yet. Remove it here... + // NOTE: digest may be empty because of some test intentionally clearing up + // the whole digest logs. + if let Some(seal) = params.header.digest_mut().pop() { + params.post_digests.push(seal); + } + } + + match block_on(self.block_import.import_block(params, Default::default())).unwrap() { + ImportResult::Imported(_) => (), + _ => panic!("expected block to be imported"), + } + + post_hash + } + + fn verify_block(&mut self, params: TestBlockImportParams) -> TestBlockImportParams { + let tmp_params = params.clear_storage_changes_and_mutate(); + let (tmp_params, _) = block_on(self.verifier.verify(tmp_params)).unwrap(); + tmp_params.clear_storage_changes_and_mutate() + } + + fn epoch_data(&self, parent_hash: &Hash, parent_number: u64, slot: Slot) -> Epoch { + self.link + .epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + |slot| Epoch::genesis(&self.link.genesis_config, slot), + ) + .unwrap() + .unwrap() + } + + fn epoch_descriptor( + &self, + parent_hash: &Hash, + parent_number: u64, + slot: Slot, + ) -> TestViableEpochDescriptor { + self.link + .epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + ) + .unwrap() + .unwrap() + } + + // Propose a block + fn propose_block(&mut self, parent_id: BlockId, slot: Option) -> TestBlockImportParams { + let parent = self.client.header(&parent_id).unwrap().unwrap(); + let parent_hash = parent.hash(); + let parent_number = *parent.number(); + + let authority = Sr25519Keyring::Alice; + let keystore = create_keystore(authority); + + let proposer = block_on(self.init(&parent)).unwrap(); + + let slot = slot.unwrap_or_else(|| { + let parent_pre_digest = find_pre_digest::(&parent).unwrap(); + parent_pre_digest.slot + 1 + }); + + let epoch = self.epoch_data(&parent_hash, parent_number, slot); + let transcript_data = + make_slot_transcript_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let signature = SyncCryptoStore::sr25519_vrf_sign( + &*keystore, + SASSAFRAS, + &authority.public(), + transcript_data, + ) + .unwrap() + .unwrap(); + + let pre_digest = PreDigest { + slot, + authority_idx: 0, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof), + ticket_aux: None, + }; + let digest = sp_runtime::generic::Digest { + logs: vec![DigestItem::sassafras_pre_digest(pre_digest)], + }; + + let mut block = proposer.propose_block(digest); + + let epoch_descriptor = self.epoch_descriptor(&parent_hash, parent_number, slot); + + // Sign the pre-sealed hash of the block and then add it to a digest item. + let hash = block.header.hash(); + let public_type_pair = authority.public().into(); + let signature = + SyncCryptoStore::sign_with(&*keystore, SASSAFRAS, &public_type_pair, hash.as_ref()) + .unwrap() + .unwrap() + .try_into() + .unwrap(); + let seal = DigestItem::sassafras_seal(signature); + block.header.digest_mut().push(seal); + + let mut params = BlockImportParams::new(BlockOrigin::Own, block.header); + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.body = Some(block.extrinsics); + params.insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate { epoch_descriptor }); + + params + } + + // Propose and import a new block on top of the given parent. + // This skips verification. + fn propose_and_import_block(&mut self, parent_id: BlockId, slot: Option) -> Hash { + let params = self.propose_block(parent_id, slot); + self.import_block(params) + } + + // Propose and import n valid blocks that are built on top of the given parent. + // The proposer takes care of producing epoch change digests according to the epoch + // duration (which is set by the test runtime). + fn propose_and_import_blocks(&mut self, mut parent_id: BlockId, n: usize) -> Vec { + let mut hashes = Vec::with_capacity(n); + + for _ in 0..n { + let hash = self.propose_and_import_block(parent_id, None); + hashes.push(hash); + parent_id = BlockId::Hash(hash); + } + + hashes + } +} + +fn create_keystore(authority: Sr25519Keyring) -> SyncCryptoStorePtr { + let keystore = Arc::new(TestKeyStore::new()); + SyncCryptoStore::sr25519_generate_new(&*keystore, SASSAFRAS, Some(&authority.to_seed())) + .expect("Creates authority key"); + keystore +} + +#[test] +fn tests_assumptions_sanity_check() { + let env = TestContext::new(); + let config = env.link.genesis_config; + + // Check that genesis configuration read from test runtime has the expected values + assert_eq!( + config.authorities, + vec![ + (Sr25519Keyring::Alice.public().into(), 1), + (Sr25519Keyring::Bob.public().into(), 1), + (Sr25519Keyring::Charlie.public().into(), 1), + ] + ); + assert_eq!(config.epoch_duration, EPOCH_DURATION); + assert_eq!(config.slot_duration, SLOT_DURATION); + assert_eq!(config.randomness, [0; 32]); + // TODO-SASS-P3: check threshold params +} + +#[test] +fn claim_secondary_slots_works() { + let env = TestContext::new(); + let mut config = env.link.genesis_config.clone(); + config.randomness = [2; 32]; + + let authorities = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; + + let epoch = Epoch { + epoch_idx: 1, + start_slot: 6.into(), + config: config.clone(), + tickets_aux: Default::default(), + }; + + let mut assignments = vec![usize::MAX; config.epoch_duration as usize]; + + for (auth_idx, auth_id) in authorities.iter().enumerate() { + let keystore = create_keystore(*auth_id); + + for slot in 0..config.epoch_duration { + if let Some((claim, auth_id2)) = + authorship::claim_slot(slot.into(), &epoch, None, &keystore) + { + assert_eq!(claim.authority_idx as usize, auth_idx); + assert_eq!(claim.slot, Slot::from(slot)); + assert_eq!(claim.ticket_aux, None); + assert_eq!(auth_id.public(), auth_id2.into()); + + // Check that this slot has not been assigned before + assert_eq!(assignments[slot as usize], usize::MAX); + assignments[slot as usize] = auth_idx; + } + } + } + // Check that every slot has been assigned + assert!(assignments.iter().all(|v| *v != usize::MAX)); + println!("secondary slots assignments: {:?}", assignments); +} + +#[test] +fn claim_primary_slots_works() { + // Here the test is very deterministic. + // If a node has in its epoch `tickets_aux` the information corresponding to the + // ticket that is presented. Then the claim ticket should just return the + // ticket auxiliary information. + let env = TestContext::new(); + let mut config = env.link.genesis_config.clone(); + config.randomness = [2; 32]; + + let mut epoch = Epoch { + epoch_idx: 1, + start_slot: 6.into(), + config: config.clone(), + tickets_aux: Default::default(), + }; + + let keystore = create_keystore(Sr25519Keyring::Alice); + + // Success if we have ticket data and the key in our keystore + + let authority_idx = 0u32; + let ticket: Ticket = [0u8; 32].try_into().unwrap(); + let ticket_proof: VRFProof = [0u8; 64].try_into().unwrap(); + let ticket_aux = TicketAux { attempt: 0, proof: ticket_proof }; + epoch.tickets_aux.insert(ticket, (authority_idx, ticket_aux)); + + let (pre_digest, auth_id) = + authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore).unwrap(); + + assert_eq!(pre_digest.authority_idx, authority_idx); + assert_eq!(auth_id, Sr25519Keyring::Alice.public().into()); + + // Fail if we don't have aux data for some ticket + + let ticket: Ticket = [1u8; 32].try_into().unwrap(); + let claim = authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore); + assert!(claim.is_none()); + + // Fail if we don't have the key for the ticket owner in our keystore + // (even though we have associated data, it doesn't matter) + + let authority_idx = 1u32; + let ticket_proof: VRFProof = [0u8; 64].try_into().unwrap(); + let ticket_aux = TicketAux { attempt: 0, proof: ticket_proof }; + epoch.tickets_aux.insert(ticket, (authority_idx, ticket_aux)); + let claim = authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore); + assert!(claim.is_none()); +} + +#[test] +#[should_panic(expected = "valid headers contain a pre-digest")] +fn import_rejects_block_without_pre_digest() { + let mut env = TestContext::new(); + + let mut import_params = env.propose_block(BlockId::Number(0), Some(999.into())); + // Remove logs from the header + import_params.header.digest_mut().logs.clear(); + + env.import_block(import_params); +} + +#[test] +#[should_panic(expected = "Unexpected epoch change")] +fn import_rejects_block_with_unexpected_epoch_changes() { + let mut env = TestContext::new(); + + env.propose_and_import_block(BlockId::Number(0), None); + + let mut import_params = env.propose_block(BlockId::Number(1), None); + // Insert an epoch change announcement when it is not required. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: env.link.genesis_config.authorities.clone(), + randomness: env.link.genesis_config.randomness, + config: None, + }) + .encode(); + let digest_item = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, digest_data); + let digest = import_params.header.digest_mut(); + digest.logs.insert(digest.logs.len() - 1, digest_item); + + env.import_block(import_params); +} + +#[test] +#[should_panic(expected = "Expected epoch change to happen")] +fn import_rejects_block_with_missing_epoch_changes() { + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(BlockId::Number(0), EPOCH_DURATION as usize); + + let mut import_params = + env.propose_block(BlockId::Hash(blocks[EPOCH_DURATION as usize - 1]), None); + + let digest = import_params.header.digest_mut(); + // Remove the epoch change announcement. + // (Implementation detail: should be the second to last entry, just before the seal) + digest.logs.remove(digest.logs.len() - 2); + + env.import_block(import_params); +} + +#[test] +fn importing_block_one_sets_genesis_epoch() { + let mut env = TestContext::new(); + + let block_hash = env.propose_and_import_block(BlockId::Number(0), Some(999.into())); + + let epoch_for_second_block = env.epoch_data(&block_hash, 1, 1000.into()); + let genesis_epoch = Epoch::genesis(&env.link.genesis_config, 999.into()); + assert_eq!(epoch_for_second_block, genesis_epoch); +} + +#[test] +fn allows_to_skip_epochs() { + // Test scenario. + // Epoch lenght: 6 slots + // + // Block# : [ 1 2 3 4 5 6 ][ 7 - - - - - ][ - - - - - - ][ 8 ... ] + // Slot# : [ 1 2 3 4 5 6 ][ 7 8 9 10 11 12 ][ 13 14 15 16 17 18 ][ 19 ... ] + // Epoch# : [ 0 ][ 1 ][ skipped ][ 3 ] + // + // As a recovery strategy, a fallback epoch 3 is created by reusing part of the + // configuration created for epoch 2. + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(BlockId::Number(0), 7); + + // First block after the a skipped epoch (block #8 @ slot #19) + let block = + env.propose_and_import_block(BlockId::Hash(*blocks.last().unwrap()), Some(19.into())); + + let epoch_changes = env.link.epoch_changes.shared_data(); + let epochs: Vec<_> = epoch_changes.tree().iter().collect(); + assert_eq!(epochs.len(), 3); + assert_eq!(*epochs[0].0, blocks[0]); + assert_eq!(*epochs[0].1, 1); + assert_eq!(*epochs[1].0, blocks[6]); + assert_eq!(*epochs[1].1, 7); + assert_eq!(*epochs[2].0, block); + assert_eq!(*epochs[2].1, 8); + + // Fist block in E0 (B1)) announces E0 (this is special) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis0, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 0); + assert_eq!(data.start_slot, Slot::from(1)); + + // First block in E0 (B1) also announces E1 + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis1, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 1); + assert_eq!(data.start_slot, Slot::from(7)); + + // First block in E1 (B7) announces E2 + // NOTE: config is used by E3 without altering epoch node values. + // This will break as soon as our assumptions about how fork-tree traversal works + // are not met anymore (this is a good thing) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: blocks[6], + number: 7, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 2); + assert_eq!(data.start_slot, Slot::from(13)); + + // First block in E3 (B8) announced E4. + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: block, + number: 8, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 4); + assert_eq!(data.start_slot, Slot::from(25)); +} + +#[test] +fn finalization_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(BlockId::Number(0), 21); + + let _fork1 = env.propose_and_import_blocks(BlockId::Hash(canon[0]), 10); + let _fork2 = env.propose_and_import_blocks(BlockId::Hash(canon[7]), 10); + let _fork3 = env.propose_and_import_blocks(BlockId::Hash(canon[11]), 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-finalize scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // + // Finalize block #14 + // + // *---------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----------#12---- C(#13) ---- D(#19) ------#21 < canon + // \ \ + // \ *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ + // *-----E(#7)---#11 < fork #1 + + // Finalize block #10 so that on next epoch change the tree is pruned + env.client.finalize_block(BlockId::Hash(canon[13]), None, true).unwrap(); + let canon_cont = env.propose_and_import_blocks(BlockId::Hash(*canon.last().unwrap()), 4); + + // Post-finalize scenario. + // + // B(#7)------ C(#13) ---- D(#19) ------Z(#25) + + let epoch_changes = epoch_changes.shared_data(); + let epoch_changes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| *h).collect(); + + // TODO-SASS-P2: this is fixed by a pending PR on substrate + //assert_eq!(epoch_changes, vec![canon[6], canon[12], canon[18], canon_cont[3]]); + + // TODO-SASS-P2 + //todo!("Requires aux_storage_cleanup"); +} + +#[test] +fn revert_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(BlockId::Number(0), 21); + let fork1 = env.propose_and_import_blocks(BlockId::Hash(canon[0]), 10); + let fork2 = env.propose_and_import_blocks(BlockId::Hash(canon[7]), 10); + let fork3 = env.propose_and_import_blocks(BlockId::Hash(canon[11]), 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-revert scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // One branch starts before the revert point (epoch data should be maintained). + // One branch starts after the revert point (epoch data should be removed). + // + // *----------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----+-----#12----- C(#13) ---- D(#19) ------#21 < canon + // \ ^ \ + // \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ to #10 + // *-----E(#7)---#11 < fork #1 + + // Revert canon chain to block #10 (best(21) - 11) + crate::revert(env.backend.clone(), 11).unwrap(); + + // Post-revert expected scenario. + // + // + // *----------------- F(#13) --#18 + // / + // A(#1) ---- B(#7) ----#8----#10 + // \ + // *------ E(#7)---#11 + + // Load and check epoch changes. + + let actual_nodes = aux_schema::load_epoch_changes::(&*env.client) + .unwrap() + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| *h) + .collect::>(); + + let expected_nodes = vec![ + canon[0], // A + canon[6], // B + fork2[4], // F + fork1[5], // E + ]; + + assert_eq!(actual_nodes, expected_nodes); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon[..10], true)); + assert!(weight_data_check(&canon[10..], false)); + assert!(weight_data_check(&fork1, true)); + assert!(weight_data_check(&fork2, true)); + assert!(weight_data_check(&fork3, false)); +} + +#[test] +fn revert_not_allowed_for_finalized() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(BlockId::Number(0), 3); + + // Finalize best block + env.client.finalize_block(BlockId::Hash(canon[2]), None, false).unwrap(); + + // Revert canon chain to last finalized block + crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon, true)); +} + +#[test] +fn verify_block_claimed_via_secondary_method() { + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(BlockId::Number(0), 7); + + let in_params = env.propose_block(BlockId::Hash(blocks[6]), Some(9.into())); + + let _out_params = env.verify_block(in_params); +} + +//================================================================================================= +// More complex tests involving communication between multiple nodes. +// +// These tests are performed via a specially crafted test network. +//================================================================================================= + +impl Environment for TestContext { + type CreateProposer = future::Ready>; + type Proposer = TestProposer; + type Error = TestError; + + fn init(&mut self, parent_header: &TestHeader) -> Self::CreateProposer { + let parent_slot = crate::find_pre_digest::(parent_header) + .expect("parent header has a pre-digest") + .slot; + + future::ready(Ok(TestProposer { + client: self.client.clone(), + link: self.link.clone(), + parent_hash: parent_header.hash(), + parent_number: *parent_header.number(), + parent_slot, + })) + } +} + +struct PeerData { + link: SassafrasLink, + block_import: SassafrasBlockImport, +} + +type SassafrasPeer = Peer, SassafrasBlockImport>; + +#[derive(Default)] +struct SassafrasTestNet { + peers: Vec, +} + +impl TestNetFactory for SassafrasTestNet { + type BlockImport = SassafrasBlockImport; + type Verifier = SassafrasVerifier; + type PeerData = Option; + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option, + ) { + let client = client.as_client(); + + let config = crate::configuration(&*client).expect("config available"); + let (block_import, link) = create_test_block_import(client.clone(), config); + + (BlockImportAdapter::new(block_import.clone()), None, Some(PeerData { link, block_import })) + } + + fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { + let client = client.as_client(); + + let data = maybe_link.as_ref().expect("data provided to verifier instantiation"); + + let config = crate::configuration(&*client).expect("config available"); + create_test_verifier(client.clone(), &data.link, config) + } + + fn peer(&mut self, i: usize) -> &mut SassafrasPeer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec { + &self.peers + } + + fn mut_peers)>(&mut self, closure: F) { + closure(&mut self.peers); + } +} + +// Multiple nodes authoring and validating blocks +#[test] +fn sassafras_network_progress() { + let net = SassafrasTestNet::new(3); + let net = Arc::new(Mutex::new(net)); + + let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; + + let mut import_notifications = Vec::new(); + let mut sassafras_workers = Vec::new(); + + for (peer_id, auth_id) in peers.iter().enumerate() { + let mut net = net.lock(); + let peer = net.peer(peer_id); + let client = peer.client().as_client(); + let backend = peer.client().as_backend(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + + let keystore = create_keystore(*auth_id); + + let data = peer.data.as_ref().expect("sassafras link set up during initialization"); + + let env = TestContext::new_with_pre_built_data( + client.clone(), + backend.clone(), + data.link.clone(), + data.block_import.clone(), + ); + + // Run the imported block number is less than five and we don't receive a block produced + // by us and one produced by another peer. + let mut got_own = false; + let mut got_other = false; + let import_futures = client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())); + import_notifications.push(import_futures); + + let slot_duration = data.link.genesis_config.slot_duration(); + let create_inherent_data_providers = Box::new(move |_, _| async move { + let slot = InherentDataProvider::from_timestamp_and_slot_duration( + Timestamp::current(), + slot_duration, + ); + Ok((slot,)) + }); + let sassafras_params = SassafrasWorkerParams { + client: client.clone(), + keystore, + select_chain, + env, + block_import: data.block_import.clone(), + sassafras_link: data.link.clone(), + sync_oracle: DummyOracle, + justification_sync_link: (), + force_authoring: false, + create_inherent_data_providers, + }; + let sassafras_worker = start_sassafras(sassafras_params).unwrap(); + sassafras_workers.push(sassafras_worker); + } + + block_on(future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + net.peers().iter().for_each(|peer| { + peer.failed_verifications().iter().next().map(|(h, e)| { + panic!("Verification failed for {:?}: {}", h, e); + }); + }); + Poll::<()>::Pending + }), + future::select(future::join_all(import_notifications), future::join_all(sassafras_workers)), + )); +} diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 1020df4cd6688..a7f6707565d0b 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -90,7 +90,7 @@ pub fn check_header( (Some(ticket), Some(ticket_aux)) => { log::debug!(target: "sassafras", "🌳 checking primary"); let transcript = - make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_index); + make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_idx); schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_aux.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; @@ -115,7 +115,7 @@ pub fn check_header( // Check slot-vrf proof - let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_index); + let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_idx); schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; @@ -208,16 +208,22 @@ where } // Check if authorship of this header is an equivocation and return a proof if so. - let equivocation_proof = - match check_equivocation(&*self.client, slot_now, slot, header, author) - .map_err(Error::Client)? - { - Some(proof) => proof, - None => return Ok(()), - }; + let equivocation_proof = match sc_consensus_slots::check_equivocation( + &*self.client, + slot_now, + slot, + header, + author, + ) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + target: "sassafras", + "🌳 Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", author, slot, equivocation_proof.first_header.hash(), @@ -225,14 +231,50 @@ where ); // Get the best block on which we will build and send the equivocation report. - let _best_id: BlockId = self + let best_id = self .select_chain .best_chain() .await .map(|h| BlockId::Hash(h.hash())) .map_err(|e| Error::Client(e.into()))?; - // TODO-SASS-P2 + // Generate a key ownership proof. We start by trying to generate the key owernship proof + // at the parent of the equivocating header, this will make sure that proof generation is + // successful since it happens during the on-going session (i.e. session keys are available + // in the state to be able to generate the proof). This might fail if the equivocation + // happens on the first block of the session, in which case its parent would be on the + // previous session. If generation on the parent header fails we try with best block as + // well. + let generate_key_owner_proof = |block_id: &BlockId| { + self.client + .runtime_api() + .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) + .map_err(Error::RuntimeApi) + }; + + let parent_id = BlockId::Hash(*header.parent_hash()); + let key_owner_proof = match generate_key_owner_proof(&parent_id)? { + Some(proof) => proof, + None => match generate_key_owner_proof(&best_id)? { + Some(proof) => proof, + None => { + debug!(target: "babe", "Equivocation offender is not part of the authority set."); + return Ok(()) + }, + }, + }; + + // submit equivocation report at best block. + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + &best_id, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + + info!(target: "sassafras", "🌳 Submitted equivocation report for author {:?}", author); Ok(()) } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index d77e34344f835..cb1e48a796952 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -54,9 +54,10 @@ use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, Randomness, SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, Ticket, - SASSAFRAS_ENGINE_ID, + AuthorityId, EquivocationProof, Randomness, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, Slot, Ticket, SASSAFRAS_ENGINE_ID, }; +use sp_io::hashing; use sp_runtime::{ generic::DigestItem, traits::{One, Saturating}, @@ -71,17 +72,21 @@ mod mock; #[cfg(all(feature = "std", test))] mod tests; +// To manage epoch changes via session pallet instead of the built-in method +// method (`SameAuthoritiesForever`). pub mod session; +// Re-export pallet symbols. pub use pallet::*; /// Tickets related metadata that is commonly used together. #[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] pub struct TicketsMetadata { - /// Number of tickets available for even and odd sessions, respectivelly. - /// I.e. the index is computed as session-index modulo 2. + /// Number of tickets available into the tickets buffers. + /// The array index is computed as epoch index modulo 2. pub tickets_count: [u32; 2], - /// Number of tickets segments + /// Number of outstanding tickets segments requiring to be sorted and stored + /// in one of the epochs tickets buffer pub segments_count: u32, } @@ -144,7 +149,7 @@ pub mod pallet { ValueQuery, >; - /// Next session authorities. + /// Next epoch authorities. #[pallet::storage] pub type NextAuthorities = StorageValue< _, @@ -152,7 +157,7 @@ pub mod pallet { ValueQuery, >; - /// The slot at which the first session started. + /// The slot at which the first epoch started. /// This is `None` until the first block is imported on chain. #[pallet::storage] #[pallet::getter(fn genesis_slot)] @@ -163,12 +168,12 @@ pub mod pallet { #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// Current session randomness. + /// Current epoch randomness. #[pallet::storage] #[pallet::getter(fn randomness)] pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; - /// Next session randomness. + /// Next epoch randomness. #[pallet::storage] pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; @@ -193,9 +198,9 @@ pub mod pallet { /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next /// epoch is enacted. - /// In other words, a config change submitted during session N will be enacted on session N+2. + /// In other words, a config change submitted during epoch N will be enacted on epoch N+2. /// This is to maintain coherence for already submitted tickets for epoch N+1 that where - /// computed using configuration parameters stored for session N+1. + /// computed using configuration parameters stored for epoch N+1. #[pallet::storage] pub(super) type PendingEpochConfigChange = StorageValue<_, SassafrasEpochConfiguration>; @@ -203,14 +208,14 @@ pub mod pallet { #[pallet::storage] pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; - /// Tickets to be used for current and next session. - /// The key consists of a - /// - `u8` equal to session-index mod 2 + /// Tickets to be used for current and next epoch. + /// The key is a tuple composed by: + /// - `u8` equal to epoch-index mod 2 /// - `u32` equal to the slot-index. #[pallet::storage] pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; - /// Next session tickets temporary accumulator. + /// Next epoch tickets temporary accumulator. /// Special `u32::MAX` key is reserved for partially sorted segment. #[pallet::storage] pub type NextTicketsSegments = @@ -270,9 +275,7 @@ pub mod pallet { Initialized::::put(pre_digest); - // TODO-SASS-P3: incremental partial ordering for Next epoch tickets. - - // Enact session change, if necessary. + // Enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now); Weight::zero() @@ -287,6 +290,27 @@ pub mod pallet { let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); + + // If we are in the second half of the epoch, we can start sorting the next epoch + // tickets. + let epoch_duration = T::EpochDuration::get(); + let current_slot_idx = Self::slot_index(pre_digest.slot); + if current_slot_idx >= epoch_duration / 2 { + let mut metadata = TicketsMeta::::get(); + if metadata.segments_count != 0 { + let epoch_idx = EpochIndex::::get() + 1; + let epoch_key = (epoch_idx & 1) as u8; + if metadata.segments_count != 0 { + let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); + Self::sort_tickets( + u32::max(1, metadata.segments_count / slots_left as u32), + epoch_key, + &mut metadata, + ); + TicketsMeta::::set(metadata); + } + } + } } } @@ -294,7 +318,7 @@ pub mod pallet { impl Pallet { /// Submit next epoch tickets. /// - /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remov ethe weight? + /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remove the weight? #[pallet::weight(10_000)] pub fn submit_tickets( origin: OriginFor, @@ -315,13 +339,13 @@ pub mod pallet { /// Plan an epoch config change. /// - /// The epoch config change is recorded and will be enacted on the next call to - /// `enact_session_change`. - /// - /// The config will be activated one epoch after. Multiple calls to this method will - /// replace any existing planned config change that had not been enacted yet. + /// The epoch config change is recorded and will be announced at the begin of the + /// next epoch together with next epoch authorities information. + /// In other words the configuration will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. /// - /// TODO: TODO-SASS-P4: proper weight + /// TODO-SASS-P4: proper weight #[pallet::weight(10_000)] pub fn plan_config_change( origin: OriginFor, @@ -336,6 +360,32 @@ pub mod pallet { PendingEpochConfigChange::::put(config); Ok(()) } + + /// Report authority equivocation. + /// + /// This method will verify the equivocation proof and validate the given key ownership + /// proof against the extracted offender. If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only block authors will + /// call it (validated in `ValidateUnsigned`), as such if the block author is defined it + /// will be defined as the equivocation reporter. + /// + /// TODO-SASS-P4: proper weight + #[pallet::weight(10_000)] + pub fn report_equivocation_unsigned( + origin: OriginFor, + _equivocation_proof: EquivocationProof, + //key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResult { + ensure_none(origin)?; + + // Self::do_report_equivocation( + // T::HandleEquivocation::block_author(), + // *equivocation_proof, + // key_owner_proof, + // ) + Ok(()) + } } #[pallet::validate_unsigned] @@ -373,7 +423,8 @@ pub mod pallet { // Current slot should be less than half of epoch duration. let epoch_duration = T::EpochDuration::get(); - if Self::current_slot_index() >= epoch_duration / 2 { + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx >= epoch_duration / 2 { log::warn!( target: "sassafras::runtime", "🌳 Timeout to propose tickets, bailing out.", @@ -382,7 +433,6 @@ pub mod pallet { } // Check tickets are below threshold - let next_auth = NextAuthorities::::get(); let epoch_config = EpochConfig::::get(); let threshold = sp_consensus_sassafras::compute_threshold( @@ -391,11 +441,6 @@ pub mod pallet { epoch_config.attempts_number, next_auth.len() as u32, ); - - // TODO-SASS-P2: if we move this in the `submit_tickets` call then we can - // can drop only the invalid tickets. - // In this way we don't penalize validators that submit tickets together - // with faulty validators. if !tickets .iter() .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) @@ -403,15 +448,17 @@ pub mod pallet { return InvalidTransaction::Custom(0).into() } + // This should be set such that it is discarded after the first epoch half + // TODO-SASS-P3: double check this. Should we then check again in the extrinsic + // itself? Is this run also just before the extrinsic execution or only on tx queue + // insertion? + let tickets_longevity = epoch_duration / 2 - current_slot_idx; + let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + ValidTransaction::with_tag_prefix("Sassafras") - // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) - // TODO-SASS-P2: if possible use a more efficient way to distinquish - // duplicates... - .and_provides(tickets) - // TODO-SASS-P2: this should be set such that it is discarded after the - // first half - .longevity(3_u64) + .longevity(tickets_longevity) + .and_provides(tickets_tag) .propagate(true) .build() } else { @@ -423,17 +470,9 @@ pub mod pallet { // Inherent methods impl Pallet { - // // TODO-SASS-P2: I don't think this is really required - // /// Determine the Sassafras slot duration based on the Timestamp module configuration. - // pub fn slot_duration() -> T::Moment { - // // We double the minimum block-period so each author can always propose within - // // the majority of their slot. - // ::MinimumPeriod::get().saturating_mul(2u32.into()) - // } - /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_end_session(now: T::BlockNumber) -> bool { + pub fn should_end_epoch(now: T::BlockNumber) -> bool { // The epoch has technically ended during the passage of time between this block and the // last, but we have to "end" the epoch now, since there is no earlier possible block we // could have done it. @@ -457,17 +496,15 @@ impl Pallet { slot.checked_sub(Self::current_epoch_start().into()).unwrap_or(u64::MAX) } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_session` + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_epoch` /// has returned `true`, and the caller is the only caller of this function. /// - /// Typically, this is not handled directly by the user, but by higher-level validator-set - /// manager logic like `pallet-session`. + /// Typically, this is not handled directly, but by a higher-level validator-set + /// manager like `pallet-session`. /// - /// TODO-SASS-P3: /// If we detect one or more skipped epochs the policy is to use the authorities and values - /// from the first skipped epoch. - /// Should the tickets be invalidated? Currently they are... see the `get-ticket` method. - pub(crate) fn enact_session_change( + /// from the first skipped epoch. The tickets are invalidated. + pub(crate) fn enact_epoch_change( authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, next_authorities: WeakBoundedVec< (AuthorityId, SassafrasAuthorityWeight), @@ -489,11 +526,9 @@ impl Pallet { let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); if slot_idx >= T::EpochDuration::get() { - // Detected one or more skipped epochs, kill tickets and recompute the `epoch_index`. + // Detected one or more skipped epochs, kill tickets and recompute epoch index. TicketsMeta::::kill(); - // TODO-SASS-P2: adjust epoch index (TEST ME) - let idx: u64 = slot_idx.into(); - epoch_idx += idx / T::EpochDuration::get(); + epoch_idx += u64::from(slot_idx) / T::EpochDuration::get(); } EpochIndex::::put(epoch_idx); @@ -546,7 +581,7 @@ impl Pallet { s.extend_from_slice(&next_epoch_index.to_le_bytes()); s.extend_from_slice(&accumulator); - let next_randomness = sp_io::hashing::blake2_256(&s); + let next_randomness = hashing::blake2_256(&s); NextRandomness::::put(&next_randomness); next_randomness @@ -575,7 +610,7 @@ impl Pallet { fn deposit_randomness(randomness: &Randomness) { let mut s = RandomnessAccumulator::::get().to_vec(); s.extend_from_slice(randomness); - let accumulator = sp_io::hashing::blake2_256(&s); + let accumulator = hashing::blake2_256(&s); RandomnessAccumulator::::put(accumulator); } @@ -677,7 +712,7 @@ impl Pallet { // Lexicographically sort the tickets who belongs to the next epoch. // The tickets are fetched from at most `max_iter` segments received via the `submit_tickets` // extrinsic. The resulting sorted vector is truncated and if all the segments where sorted - // it is saved to be as the next session tickets. + // it is saved to be as the next epoch tickets. // Else the result is saved to be used by next calls. fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { let mut segments_count = metadata.segments_count; @@ -747,6 +782,27 @@ impl Pallet { }, } } + + /// Submits an equivocation via an unsigned extrinsic. + /// + /// Unsigned extrinsic is created with a call to `report_equivocation_unsigned`. + pub fn submit_unsigned_equivocation_report( + equivocation_proof: EquivocationProof, + //key_owner_proof: T::KeyOwnerProof, + ) -> bool { + let call = Call::report_equivocation_unsigned { + equivocation_proof, + // key_owner_proof, + }; + + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(()) => true, + Err(e) => { + log::error!(target: "runtime::sassafras", "Error submitting equivocation report: {:?}", e); + false + }, + } + } } /// Trigger an epoch change, if any should take place. @@ -770,11 +826,11 @@ pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { fn trigger(now: T::BlockNumber) { - if >::should_end_session(now) { + if >::should_end_epoch(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); - >::enact_session_change(authorities, next_authorities); + >::enact_epoch_change(authorities, next_authorities); } } } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index a8c9ca6e856d7..c7bd93c0b2175 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -107,10 +107,13 @@ frame_support::construct_runtime!( } ); +/// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len).1 } +/// Build and returns test storage externalities and authority set pairs used +/// by Sassafras genesis configuration. pub fn new_test_ext_with_pairs( authorities_len: usize, ) -> (Vec, sp_io::TestExternalities) { @@ -120,13 +123,16 @@ pub fn new_test_ext_with_pairs( let authorities = pairs.iter().map(|p| (p.public(), 1)).collect(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let config = pallet_sassafras::GenesisConfig { authorities, epoch_config: Default::default() }; - >::assimilate_storage(&config, &mut t) - .unwrap(); + >::assimilate_storage( + &config, + &mut storage, + ) + .unwrap(); - (pairs, t.into()) + (pairs, storage.into()) } fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { @@ -150,6 +156,8 @@ fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput (output, proof) } +/// Construct at most `attempts` tickets for the given `slot`. +/// TODO-SASS-P3: filter out invalid tickets according to test threshold. pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { (0..attempts) .into_iter() @@ -163,7 +171,7 @@ fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { let mut epoch = Sassafras::epoch_index(); let mut randomness = Sassafras::randomness(); - // Check if epoch is going to change on initialization + // Check if epoch is going to change on initialization. let epoch_start = Sassafras::current_epoch_start(); if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); @@ -178,6 +186,7 @@ fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { (output, proof) } +/// Produce a `PreDigest` instance for the given parameters. pub fn make_pre_digest( authority_idx: AuthorityIndex, slot: Slot, @@ -187,6 +196,8 @@ pub fn make_pre_digest( PreDigest { authority_idx, slot, vrf_output, vrf_proof, ticket_aux: None } } +/// Produce a `PreDigest` instance for the given parameters and wrap the result into a `Digest` +/// instance. pub fn make_wrapped_pre_digest( authority_idx: AuthorityIndex, slot: Slot, @@ -198,6 +209,7 @@ pub fn make_wrapped_pre_digest( Digest { logs: vec![log] } } +/// Progress the pallet state up to the given block `number` and `slot`. pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { Sassafras::on_finalize(System::block_number()); let parent_hash = System::finalize().hash(); @@ -211,7 +223,8 @@ pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { digest } -/// Slots will grow accordingly to blocks +/// Progress the pallet state up to the given block `number`. +/// Slots will grow linearly accordingly to blocks. pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { let mut slot = Sassafras::current_slot() + 1; let mut digest = None; diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs index bfe4e1c79b968..e15fd3637b9ae 100644 --- a/frame/sassafras/src/session.rs +++ b/frame/sassafras/src/session.rs @@ -29,7 +29,7 @@ impl ShouldEndSession for Pallet { // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we // have initialized the pallet and updated the current slot. Self::on_initialize(now); - Self::should_end_session(now) + Self::should_end_epoch(now) } } @@ -66,7 +66,7 @@ impl OneSessionHandler for Pallet { ), ); - Self::enact_session_change(bounded_authorities, next_bounded_authorities) + Self::enact_epoch_change(bounded_authorities, next_bounded_authorities) } fn on_disabled(i: u32) { diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index bd253c0c72f40..9c5828ac50b62 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -25,6 +25,14 @@ use hex_literal::hex; use sp_consensus_sassafras::Slot; use sp_runtime::traits::Get; +#[test] +fn genesis_values_sanity_check() { + new_test_ext(4).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 4); + assert_eq!(EpochConfig::::get(), Default::default()); + }); +} + #[test] fn slot_ticket_fetch() { let genesis_slot = Slot::from(100); @@ -49,21 +57,22 @@ fn slot_ticket_fetch() { Tickets::::insert((1, i as u32), ticket); }); TicketsMeta::::set(TicketsMetadata { - tickets_count: [max_tickets, max_tickets - 1], + tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], segments_count: 0, }); - // Before initializing `GenesisSlot` value (should return first element of current session) - // This is due to special case hardcoded value. + // Before initializing `GenesisSlot` value the pallet always return the first slot + // This is a kind of special case hardcoded policy that should never happen in practice + // (i.e. the first thing the pallet does is to initialize the genesis slot). assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), Some(curr_tickets[1])); assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), Some(curr_tickets[1])); assert_eq!(Sassafras::slot_ticket(genesis_slot + 100), Some(curr_tickets[1])); - // Initialize genesis slot value. + // Initialize genesis slot.. GenesisSlot::::set(genesis_slot); - // Before Current session. + // Try fetch a ticket for a slot before current session. assert_eq!(Sassafras::slot_ticket(0.into()), None); // Current session tickets. @@ -90,20 +99,12 @@ fn slot_ticket_fetch() { assert_eq!(Sassafras::slot_ticket(genesis_slot + 18), Some(next_tickets[2])); assert_eq!(Sassafras::slot_ticket(genesis_slot + 19), Some(next_tickets[0])); - // Beyend next session. + // Try fetch tickets for slots beyend next session. assert_eq!(Sassafras::slot_ticket(genesis_slot + 20), None); assert_eq!(Sassafras::slot_ticket(genesis_slot + 42), None); }); } -#[test] -fn genesis_values() { - new_test_ext(4).execute_with(|| { - assert_eq!(Sassafras::authorities().len(), 4); - assert_eq!(EpochConfig::::get(), Default::default()); - }); -} - #[test] fn on_first_block_after_genesis() { let (pairs, mut ext) = new_test_ext_with_pairs(4); @@ -222,7 +223,7 @@ fn on_normal_block() { } #[test] -fn epoch_change_block() { +fn produce_epoch_change_digest() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { @@ -293,6 +294,133 @@ fn epoch_change_block() { }) } +#[test] +fn produce_epoch_change_digest_with_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + let config = SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; + Sassafras::plan_config_change(RuntimeOrigin::root(), config.clone()).unwrap(); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); + + Sassafras::on_finalize(start_block + epoch_duration); + + // Header data check. + // Skip pallet status checks that were already performed by other tests. + + let header = System::finalize(); + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: Some(config), // We are mostly interested in this + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn segments_incremental_sortition_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1); + let pair = &pairs[0]; + let segments_num = 14; + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + + let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); + System::initialize(&start_block, &Default::default(), &digest); + Sassafras::on_initialize(start_block); + + // Submit authoring tickets in three different batches. + // We can ignore the threshold since we are not passing through the unsigned extrinsic + // validation. + let mut tickets: Vec = + make_tickets(start_slot + 1, segments_num * max_tickets, pair) + .into_iter() + .map(|(output, _)| output) + .collect(); + let segment_len = tickets.len() / segments_num as usize; + for i in 0..segments_num as usize { + let segment = + tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); + } + + tickets.sort(); + tickets.truncate(max_tickets as usize); + let _expected_tickets = tickets; + + let epoch_duration: u64 = ::EpochDuration::get(); + + // Proceed to half of the epoch (sortition should not have been started yet) + let half_epoch_block = start_block + epoch_duration / 2; + progress_to_block(half_epoch_block, pair); + + // Check that next epoch tickets sortition is not started yet + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, segments_num); + assert_eq!(meta.tickets_count, [0, 0]); + + // Monitor incremental sortition + + progress_to_block(half_epoch_block + 1, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 12); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 2, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 9); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 3, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 6); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 4, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 3); + assert_eq!(meta.tickets_count, [0, 0]); + + Sassafras::on_finalize(half_epoch_block + 4); + let header = System::finalize(); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); + assert_eq!(header.digest.logs.len(), 1); + + // The next block will be the first produced on the new epoch, + // At this point the tickets were found to be sorted and ready to be used. + let slot = Sassafras::current_slot() + 1; + let digest = make_wrapped_pre_digest(0, slot, pair); + let number = System::block_number() + 1; + System::initialize(&number, &header.hash(), &digest); + Sassafras::on_initialize(number); + Sassafras::on_finalize(half_epoch_block + 5); + let header = System::finalize(); + assert_eq!(header.digest.logs.len(), 2); + }); +} + #[test] fn submit_enact_claim_tickets() { let (pairs, mut ext) = new_test_ext_with_pairs(4); @@ -341,9 +469,10 @@ fn submit_enact_claim_tickets() { // Process up to the last epoch slot (do not enact epoch change) let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); - // TODO-SASS-P2: at this point next tickets should have been sorted - //assert_eq!(NextTicketsSegmentsCount::::get(), 0); - //assert!(Tickets::::iter().next().is_some()); + // At this point next tickets should have been sorted + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); // Check if we can claim next epoch tickets in outside-in fashion. let slot = Sassafras::current_slot(); @@ -379,7 +508,7 @@ fn submit_enact_claim_tickets() { } #[test] -fn block_skips_epochs() { +fn block_allowed_to_skip_epochs() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 621ab859b914f..98a14819ba326 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -329,6 +329,7 @@ where /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. #[derive(Decode, Encode, PartialEq)] pub struct OpaqueKeyOwnershipProof(Vec); + impl OpaqueKeyOwnershipProof { /// Create a new `OpaqueKeyOwnershipProof` using the given encoded /// representation. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 56903eb7da7c3..c4bd2daca8f97 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -78,12 +78,15 @@ pub type SassafrasAuthorityWeight = u64; /// Primary blocks have a weight of 1 whereas secondary blocks have a weight of 0. pub type SassafrasBlockWeight = u32; +/// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). +pub type EquivocationProof = sp_consensus_slots::EquivocationProof; + /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] pub struct SassafrasConfiguration { /// The slot duration in milliseconds. pub slot_duration: u64, - /// The duration of epochs in slots. + /// The duration of epoch in slots. pub epoch_duration: u64, /// The authorities for the epoch. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, @@ -130,7 +133,6 @@ pub struct TicketAux { /// The parameters should be chosen such that T <= 1. /// If `attempts * validators` is zero then we fallback to T = 0 // TODO-SASS-P3: this formula must be double-checked... -#[inline] pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> U256 { let den = attempts as u64 * validators as u64; let num = redundancy as u64 * slots as u64; @@ -141,11 +143,31 @@ pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: } /// Returns true if the given VRF output is lower than the given threshold, false otherwise. -#[inline] pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { U256::from(ticket.as_bytes()) < threshold } +/// An opaque type used to represent the key ownership proof at the runtime API boundary. +/// The inner value is an encoded representation of the actual key ownership proof which will be +/// parameterized when defining the runtime. At the runtime API boundary this type is unknown and +/// as such we keep this opaque representation, implementors of the runtime API will have to make +/// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. +#[derive(Decode, Encode, PartialEq)] +pub struct OpaqueKeyOwnershipProof(Vec); + +impl OpaqueKeyOwnershipProof { + /// Create a new `OpaqueKeyOwnershipProof` using the given encoded representation. + pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { + OpaqueKeyOwnershipProof(inner) + } + + /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key + /// ownership proof type. + pub fn decode(self) -> Option { + Decode::decode(&mut &self.0[..]).ok() + } +} + // Runtime API. sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. @@ -159,5 +181,34 @@ sp_api::decl_runtime_apis! { /// Get expected ticket for the given slot. fn slot_ticket(slot: Slot) -> Option; + + /// Generates a proof of key ownership for the given authority in the current epoch. + /// + /// An example usage of this module is coupled with the session historical module to prove + /// that a given authority key is tied to a given staking identity during a specific + /// session. Proofs of key ownership are necessary for submitting equivocation reports. + /// + /// NOTE: even though the API takes a `slot` as parameter the current implementations + /// ignores this parameter and instead relies on this method being called at the correct + /// block height, i.e. any point at which the epoch for the given slot is live on-chain. + /// Future implementations will instead use indexed data through an offchain worker, not + /// requiring older states to be available. + fn generate_key_ownership_proof( + slot: Slot, + authority_id: AuthorityId, + ) -> Option; + + /// Submits an unsigned extrinsic to report an equivocation. + /// + /// The caller must provide the equivocation proof and a key ownership proof (should be + /// obtained using `generate_key_ownership_proof`). The extrinsic will be unsigned and + /// should only be accepted for local authorship (not to be broadcast to the network). This + /// method returns `None` when creation of the extrinsic fails, e.g. if equivocation + /// reporting is disabled for the given runtime (i.e. this method is hardcoded to return + /// `None`). Only useful in an offchain context. + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: EquivocationProof, + key_owner_proof: OpaqueKeyOwnershipProof, + ) -> bool; } } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 744cc527e6012..92c8fd4ad6856 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,6 +18,7 @@ beefy-merkle-tree = { version = "4.0.0-dev", default-features = false, path = ". sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -35,6 +36,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } +pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../frame/sassafras" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } @@ -73,6 +75,7 @@ std = [ "sp-application-crypto/std", "sp-consensus-aura/std", "sp-consensus-babe/std", + "sp-consensus-sassafras/std", "sp-block-builder/std", "codec/std", "scale-info/std", @@ -95,6 +98,7 @@ std = [ "sp-externalities/std", "sp-state-machine/std", "pallet-babe/std", + "pallet-sassafras/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index a64e3f25ef041..975285c261d33 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -63,10 +63,10 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// Ensure Babe and Aura use the same crypto to simplify things a bit. +// Ensure Babe, Sassafras and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; - pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; +pub type SassafrasId = sp_consensus_sassafras::AuthorityId; // Include the WASM binary #[cfg(feature = "std")] @@ -163,6 +163,22 @@ pub enum Extrinsic { OffchainIndexSet(Vec, Vec), OffchainIndexClear(Vec), Store(Vec), + Sassafras, +} + +impl From> for Extrinsic { + fn from(call: pallet_sassafras::Call) -> Self { + use pallet_sassafras::Call; + match call { + Call::submit_tickets { tickets: _ } => Extrinsic::Sassafras, + Call::plan_config_change { config: _ } => Extrinsic::Sassafras, + Call::report_equivocation_unsigned { equivocation_proof: _ } => Extrinsic::Sassafras, + _ => panic!( + "Unexpected Sassafras call type: {:?}, unable to converto to Extrinsic", + call + ), + } + } } parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinsic does not need this @@ -211,6 +227,8 @@ impl BlindCheckable for Extrinsic { Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), + // TODO-SASS-P2 + Extrinsic::Sassafras => Ok(Extrinsic::Sassafras), } } } @@ -524,6 +542,9 @@ impl frame_support::traits::PalletInfo for Runtime { if type_id == sp_std::any::TypeId::of::>() { return Some(2) } + if type_id == sp_std::any::TypeId::of::>() { + return Some(3) + } None } @@ -538,6 +559,9 @@ impl frame_support::traits::PalletInfo for Runtime { if type_id == sp_std::any::TypeId::of::>() { return Some("Babe") } + if type_id == sp_std::any::TypeId::of::>() { + return Some("Sassafras") + } None } @@ -552,6 +576,9 @@ impl frame_support::traits::PalletInfo for Runtime { if type_id == sp_std::any::TypeId::of::>() { return Some("pallet_babe") } + if type_id == sp_std::any::TypeId::of::>() { + return Some("pallet_sassafras") + } None } @@ -567,6 +594,9 @@ impl frame_support::traits::PalletInfo for Runtime { if type_id == sp_std::any::TypeId::of::>() { return Some(pallet_babe::Pallet::::crate_version()) } + if type_id == sp_std::any::TypeId::of::>() { + return Some(pallet_sassafras::Pallet::::crate_version()) + } None } @@ -621,6 +651,7 @@ impl pallet_timestamp::Config for Runtime { } parameter_types! { + pub const SlotDuration: u64 = 1000; pub const EpochDuration: u64 = 6; } @@ -649,6 +680,23 @@ impl pallet_babe::Config for Runtime { type MaxAuthorities = ConstU32<10>; } +impl frame_system::offchain::SendTransactionTypes for Runtime +where + Extrinsic: From, +{ + type Extrinsic = Extrinsic; + type OverarchingCall = Extrinsic; +} + +impl pallet_sassafras::Config for Runtime { + type SlotDuration = SlotDuration; + type EpochDuration = EpochDuration; + //type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32<10>; +} + /// Adds one to the given input and returns the final result. #[inline(never)] fn benchmark_add_one(i: u64) -> u64 { @@ -895,6 +943,48 @@ cfg_if! { } } + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + let authorities = system::authorities().into_iter().map(|x| { + let authority: sr25519::Public = x.into(); + (SassafrasId::from(authority), 1) + }).collect(); + sp_consensus_sassafras::SassafrasConfiguration { + slot_duration: SlotDuration::get(), + epoch_duration: EpochDuration::get(), + authorities, + randomness: >::randomness(), + threshold_params: >::config(), + } + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + >::submit_tickets_unsigned_extrinsic(tickets) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { + >::slot_ticket(slot) + } + + fn generate_key_ownership_proof( + _slot: sp_consensus_sassafras::Slot, + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + // TODO-SASS-P2 + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + // TODO-SASS-P2 + false + } + } + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { let ex = Extrinsic::IncludeData(header.number.encode()); @@ -1169,6 +1259,44 @@ cfg_if! { } } + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + sp_consensus_sassafras::SassafrasConfiguration { + slot_duration: SlotDuration::get(), + epoch_duration: EpochDuration::get(), + authorities: >::authorities().to_vec(), + randomness: >::randomness(), + threshold_params: >::config(), + } + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + >::submit_tickets_unsigned_extrinsic(tickets) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { + >::slot_ticket(slot) + } + + fn generate_key_ownership_proof( + slot: sp_consensus_sassafras::Slot, + authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + // TODO-SASS-P2 + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + // TODO-SASS-P2 + false + } + } + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { let ex = Extrinsic::IncludeData(header.number.encode()); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 6e33d5c25fe6f..e832ea66284a6 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -276,6 +276,8 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx Ok(Ok(())) }, Extrinsic::Store(data) => execute_store(data.clone()), + // TODO-SASS-P2 + Extrinsic::Sassafras => Ok(Ok(())), } } From fa72e51919161b739dd9f4cae0256bc4154e87bd Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 29 Oct 2022 17:26:28 +0200 Subject: [PATCH 09/62] Fixes after master merge --- Cargo.lock | 99 ++++++++++++++------ client/consensus/sassafras/src/authorship.rs | 12 +-- client/consensus/sassafras/src/tests.rs | 4 +- 3 files changed, 74 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a5e309171c56c..71cb890f9f535 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -980,10 +980,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", - "textwrap", + "textwrap 0.11.0", "unicode-width", ] +[[package]] +name = "clap" +version = "3.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +dependencies = [ + "atty", + "bitflags", + "clap_derive 3.2.18", + "clap_lex 0.2.4", + "indexmap", + "once_cell", + "strsim", + "termcolor", + "textwrap 0.16.0", +] + [[package]] name = "clap" version = "4.0.11" @@ -992,8 +1009,8 @@ checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" dependencies = [ "atty", "bitflags", - "clap_derive", - "clap_lex", + "clap_derive 4.0.10", + "clap_lex 0.3.0", "once_cell", "strsim", "termcolor", @@ -1008,6 +1025,19 @@ dependencies = [ "clap 4.0.11", ] +[[package]] +name = "clap_derive" +version = "3.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "clap_derive" version = "4.0.10" @@ -1021,6 +1051,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "clap_lex" version = "0.3.0" @@ -2829,21 +2868,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] -<<<<<<< HEAD name = "hex-literal" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" [[package]] -name = "hex_fmt" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - -[[package]] -======= ->>>>>>> master name = "hmac" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4679,7 +4709,7 @@ dependencies = [ name = "node-sassafras" version = "0.1.0" dependencies = [ - "clap 3.1.18", + "clap 3.2.23", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -6060,44 +6090,45 @@ dependencies = [ ] [[package]] -<<<<<<< HEAD -name = "pallet-sassafras" -version = "0.1.0" +name = "pallet-root-offences" +version = "1.0.0" dependencies = [ - "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", - "hex-literal", - "log", + "pallet-balances", + "pallet-offences", "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-application-crypto", - "sp-consensus-sassafras", "sp-core", "sp-io", "sp-runtime", -======= -name = "pallet-root-offences" -version = "1.0.0" + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-sassafras" +version = "0.1.0" dependencies = [ - "frame-election-provider-support", + "frame-benchmarking", "frame-support", "frame-system", - "pallet-balances", - "pallet-offences", + "hex-literal", + "log", "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", "sp-core", "sp-io", "sp-runtime", - "sp-staking", ->>>>>>> master "sp-std", ] @@ -10804,6 +10835,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "textwrap" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" + [[package]] name = "thiserror" version = "1.0.30" diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 1f8f7b3be3787..9e620194ac09d 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -176,7 +176,6 @@ where L: sc_consensus::JustificationSyncLink, ER: std::error::Error + Send + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; @@ -184,6 +183,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { "sassafras" @@ -193,11 +193,7 @@ where &mut self.block_import } - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { self.epoch_changes .shared_data() .epoch_descriptor_for_child_of( @@ -210,7 +206,7 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) @@ -277,7 +273,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, + epoch_descriptor: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 5cf0bef6795e8..5d1915473eb9f 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -678,7 +678,7 @@ fn finalization_prunes_epoch_changes_and_removes_weights() { // *-----E(#7)---#11 < fork #1 // Finalize block #10 so that on next epoch change the tree is pruned - env.client.finalize_block(BlockId::Hash(canon[13]), None, true).unwrap(); + env.client.finalize_block(&canon[13], None, true).unwrap(); let canon_cont = env.propose_and_import_blocks(BlockId::Hash(*canon.last().unwrap()), 4); // Post-finalize scenario. @@ -777,7 +777,7 @@ fn revert_not_allowed_for_finalized() { let canon = env.propose_and_import_blocks(BlockId::Number(0), 3); // Finalize best block - env.client.finalize_block(BlockId::Hash(canon[2]), None, false).unwrap(); + env.client.finalize_block(&canon[2], None, false).unwrap(); // Revert canon chain to last finalized block crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); From 7d8f5c3bce1df32c4efb92ad5d45be5dffd6ba40 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 29 Oct 2022 17:58:46 +0200 Subject: [PATCH 10/62] Fixes in the sassafras demo after master merge --- Cargo.lock | 53 ++------------------------ bin/node-sassafras/node/Cargo.toml | 2 +- bin/node-sassafras/node/src/service.rs | 3 +- bin/node-sassafras/runtime/src/lib.rs | 7 +++- 4 files changed, 12 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71cb890f9f535..909e307957fc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -980,27 +980,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", - "textwrap 0.11.0", + "textwrap", "unicode-width", ] -[[package]] -name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "atty", - "bitflags", - "clap_derive 3.2.18", - "clap_lex 0.2.4", - "indexmap", - "once_cell", - "strsim", - "termcolor", - "textwrap 0.16.0", -] - [[package]] name = "clap" version = "4.0.11" @@ -1009,8 +992,8 @@ checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" dependencies = [ "atty", "bitflags", - "clap_derive 4.0.10", - "clap_lex 0.3.0", + "clap_derive", + "clap_lex", "once_cell", "strsim", "termcolor", @@ -1025,19 +1008,6 @@ dependencies = [ "clap 4.0.11", ] -[[package]] -name = "clap_derive" -version = "3.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "clap_derive" version = "4.0.10" @@ -1051,15 +1021,6 @@ dependencies = [ "syn", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.3.0" @@ -4709,7 +4670,7 @@ dependencies = [ name = "node-sassafras" version = "0.1.0" dependencies = [ - "clap 3.2.23", + "clap 4.0.11", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -10835,12 +10796,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.30" diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 8587b0462a03f..46e9216e9f1aa 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-sassafras" [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } sp-core = { version = "6.0.0", path = "../../../primitives/core" } diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 1f7beb20f3609..3e6a6d96e2eb5 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -203,7 +203,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -249,6 +249,7 @@ pub fn new_full(mut config: Configuration) -> Result rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, + tx_handler_controller, config, telemetry: telemetry.as_mut(), })?; diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index f0ef011b14d39..1ffb7789220c3 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -157,8 +157,11 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2_u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + (2_u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + NORMAL_DISPATCH_RATIO, + ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; From 1be8a8475bef992a4540889ae45afe16e250d057 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 3 Nov 2022 14:08:30 +0100 Subject: [PATCH 11/62] TEMPORARY --- bin/node-sassafras/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 1ffb7789220c3..4db5e3aa3dd2a 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -156,7 +156,7 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; - /// We allow for 2 seconds of compute with a 6 second average block time. + /// We allow for 2 seconds of compute with a 3 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( (2_u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), From 0fc4a98dab463df8877085aa54eb54b5960f8231 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 15 Nov 2022 18:19:35 +0100 Subject: [PATCH 12/62] Refactory of block-import method Introduced a separate epoch-import method to improve code readability --- .../consensus/sassafras/src/block_import.rs | 413 ++++++++++-------- frame/sassafras/src/lib.rs | 7 +- 2 files changed, 228 insertions(+), 192 deletions(-) diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 01e804ecf3ea1..81563256ef30e 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -21,7 +21,7 @@ use super::*; use sc_client_api::{AuxDataOperations, FinalityNotification, PreCommitActions}; -/// A block-import handler for Sassafras. +/// Block-import handler for Sassafras. /// /// This scans each imported block for epoch change announcements. The announcements are /// tracked in a tree (of all forks), and the import logic validates all epoch change @@ -87,6 +87,149 @@ where } } +struct RecoverableEpochChanges { + old_epoch_changes: EpochChangesFor, + weak_lock: sc_consensus::shared_data::SharedDataLockedUpgradable>, +} + +impl RecoverableEpochChanges { + fn rollback(mut self) { + *self.weak_lock.upgrade() = self.old_epoch_changes; + } +} + +impl SassafrasBlockImport +where + C: AuxStore + HeaderBackend + HeaderMetadata, +{ + // The fork choice rule is that we pick the heaviest chain (i.e. more blocks built + // using primary mechanism), if there's a tie we go with the longest chain. + fn is_new_best( + &self, + curr_weight: u32, + curr_number: NumberFor, + parent_hash: B::Hash, + ) -> Result { + let info = self.client.info(); + + let new_best = if info.best_hash == parent_hash { + true + } else { + let best_weight = aux_schema::load_block_weight(&*self.client, &info.best_hash) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup("No block weight for best header.".into()) + })?; + curr_weight > best_weight || + (curr_weight == best_weight && curr_number > info.best_number) + }; + + Ok(new_best) + } + + fn import_epoch( + &mut self, + viable_epoch_desc: ViableEpochDescriptor, Epoch>, + next_epoch_desc: NextEpochDescriptor, + slot: Slot, + number: NumberFor, + hash: B::Hash, + parent_hash: B::Hash, + verbose: bool, + auxiliary: &mut Vec<(Vec, Option>)>, + ) -> Result, ConsensusError> { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + let log_level = if verbose { log::Level::Debug } else { log::Level::Info }; + + let mut viable_epoch = epoch_changes + .viable_epoch(&viable_epoch_desc, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })? + .into_cloned(); + + if viable_epoch.as_ref().end_slot() <= slot { + // Some epochs must have been skipped as our current slot fits outside the + // current epoch. We will figure out which is the first skipped epoch and we + // will partially re-use its data for this "recovery" epoch. + let epoch_data = viable_epoch.as_mut(); + let skipped_epochs = + (*slot - *epoch_data.start_slot) / epoch_data.config.epoch_duration; + let original_epoch_idx = epoch_data.epoch_idx; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // We do not update the original epoch that may be reused because there may be + // some other forks where the epoch isn't skipped. + // Not updating the original epoch works because when we search the tree for + // which epoch to use for a given slot, we will search in-depth with the + // predicate `epoch.start_slot <= slot` which will still match correctly without + // requiring to update `start_slot` to the correct value. + epoch_data.epoch_idx += skipped_epochs; + epoch_data.start_slot = Slot::from( + *epoch_data.start_slot + skipped_epochs * epoch_data.config.epoch_duration, + ); + log::warn!( + target: "sassafras", + "🌳 Epoch(s) skipped from {} to {}", + original_epoch_idx, epoch_data.epoch_idx + ); + } + + log!(target: "sassafras", + log_level, + "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_idx, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_desc); + + log!(target: "sassafras", + log_level, + "🌳 🍁 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + let old_epoch_changes = (*epoch_changes).clone(); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import(descendent_query(&*self.client), hash, number, parent_hash, next_epoch) + .map_err(|e| { + ConsensusError::ClientImport(format!("Error importing epoch changes: {}", e)) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + warn!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + *epoch_changes = old_epoch_changes; + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + auxiliary.extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + Ok(RecoverableEpochChanges { old_epoch_changes, weak_lock: epoch_changes.release_mutex() }) + } +} + #[async_trait::async_trait] impl BlockImport for SassafrasBlockImport where @@ -112,6 +255,21 @@ where let hash = block.post_hash(); let number = *block.header.number(); + let viable_epoch_desc = block + .remove_intermediate::>(INTERMEDIATE_KEY)? + .epoch_descriptor; + + // Early exit if block already in chain, otherwise the check for + // epoch changes will error when trying to re-import an epoch change. + match self.client.status(BlockId::Hash(hash)) { + Ok(sp_blockchain::BlockStatus::InChain) => { + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) + }, + Ok(sp_blockchain::BlockStatus::Unknown) => {}, + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + let pre_digest = find_pre_digest::(&block.header) .expect("valid headers contain a pre-digest; header has been already verified; qed"); let slot = pre_digest.slot; @@ -126,7 +284,6 @@ where sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), ) })?; - let parent_slot = find_pre_digest::(&parent_header) .map(|d| d.slot) .expect("parent is non-genesis; valid headers contain a pre-digest; header has been already verified; qed"); @@ -138,202 +295,78 @@ where )) } - // If there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; - - // Use an extra scope to make the compiler happy, because otherwise he complains about the - // mutex, even if we dropped it... - let mut epoch_changes = { - let mut epoch_changes = self.epoch_changes.shared_data_locked(); - - // Check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ClientImport( - sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)) - .into(), - ) - })? - }; - - let intermediate = - block.remove_intermediate::>(INTERMEDIATE_KEY)?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - - let total_weight = parent_weight + pre_digest.ticket_aux.is_some() as u32; - - // Search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some()) { - (true, false) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), - )), - (false, true) => - return Err(ConsensusError::ClientImport( - sassafras_err(Error::::UnexpectedEpochChange).into(), - )), - _ => (), - } + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + + let first_in_epoch = parent_slot < viable_epoch_desc.start_slot(); + + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some((*epoch_changes).clone()); - - let mut viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.genesis_config, slot) - }) - .ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })? - .into_cloned(); - - if viable_epoch.as_ref().end_slot() <= slot { - // Some epochs must have been skipped as our current slot fits outside the - // current epoch. We will figure out which is the first skipped epoch and we - // will partially re-use its data for this "recovery" epoch. - let epoch_data = viable_epoch.as_mut(); - let skipped_epochs = - (*slot - *epoch_data.start_slot) / epoch_data.config.epoch_duration; - let original_epoch_idx = epoch_data.epoch_idx; - - // NOTE: notice that we are only updating a local copy of the `Epoch`, this - // makes it so that when we insert the next epoch into `EpochChanges` below - // (after incrementing it), it will use the correct epoch index and start slot. - // We do not update the original epoch that may be reused because there may be - // some other forks where the epoch isn't skipped. - // Not updating the original epoch works because when we search the tree for - // which epoch to use for a given slot, we will search in-depth with the - // predicate `epoch.start_slot <= slot` which will still match correctly without - // requiring to update `start_slot` to the correct value. - epoch_data.epoch_idx += skipped_epochs; - epoch_data.start_slot = Slot::from( - *epoch_data.start_slot + skipped_epochs * epoch_data.config.epoch_duration, - ); - log::warn!( - target: "sassafras", - "🌳 Epoch(s) skipped from {} to {}", - original_epoch_idx, epoch_data.epoch_idx - ); - } - - // Restrict info logging during initial sync to avoid spam - let log_level = match block.origin { - BlockOrigin::NetworkInitialSync => log::Level::Debug, - _ => log::Level::Info, - }; - - log!(target: "sassafras", - log_level, - "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_idx, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); - - let next_epoch = viable_epoch.increment(next_epoch_descriptor); - - log!(target: "sassafras", - log_level, - "🌳 🍁 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); - - // Prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized(self.client.clone(), &mut epoch_changes)?; - - epoch_changes - .import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ) - .map_err(|e| { - ConsensusError::ClientImport(format!( - "Error importing epoch changes: {}", - e - )) - })?; - - Ok(()) - }; - - if let Err(e) = prune_and_import() { - debug!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); - *epoch_changes = - old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e) - } - - aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { - block - .auxiliary - .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - } + // Compute the total weight of the chain, including the imported block. - aux_schema::write_block_weight(hash, total_weight, |values| { - block - .auxiliary - .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - }); - - // The fork choice rule is that we pick the heaviest chain (i.e. more blocks built - // using primary mechanism), if there's a tie we go with the longest chain. - block.fork_choice = { - let info = self.client.info(); - let best_weight = if &info.best_hash == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, &info.best_hash) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| { - ConsensusError::ChainLookup( - "No block weight for parent header.".to_string(), - ) - })? - }; - - let is_new_best = total_weight > best_weight || - (total_weight == best_weight && number > info.best_number); - Some(ForkChoiceStrategy::Custom(is_new_best)) - }; - // Release the mutex, but it stays locked - epoch_changes.release_mutex() - }; + let parent_weight = aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .or_else(|| (*parent_header.number() == Zero::zero()).then(|| 0)) + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)).into(), + ) + })?; + + let total_weight = parent_weight + pre_digest.ticket_aux.is_some() as u32; + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // If there's a pending epoch we'll try to update all the involved data while + // saving the previous epoch changes as well. In this way we can revert it if + // there's any error. + let epoch_changes_data = next_epoch_digest + .map(|next_epoch_desc| { + self.import_epoch( + viable_epoch_desc, + next_epoch_desc, + slot, + number, + hash, + parent_hash, + block.origin != BlockOrigin::NetworkInitialSync, + &mut block.auxiliary, + ) + }) + .transpose()?; + + // The fork choice rule is intentionally changed within the context of the + // epoch changes lock to avoid annoying race conditions on what is the current + // best block. That is, the best may be changed by the inner block import. + let is_new_best = self.is_new_best(total_weight, number, parent_hash)?; + block.fork_choice = Some(ForkChoiceStrategy::Custom(is_new_best)); let import_result = self.inner.import_block(block, new_cache).await; // Revert to the original epoch changes in case there's an error // importing the block + // TODO-SASS-P3: shouldn't we check for Ok(Imported(_))? if import_result.is_err() { - if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes.upgrade() = old_epoch_changes; + if let Some(data) = epoch_changes_data { + data.rollback(); } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index cb1e48a796952..fda4226008e88 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -252,8 +252,8 @@ pub mod pallet { let pre_digest = >::digest() .logs .iter() - .filter_map(|s| { - s.as_pre_runtime().and_then(|(id, mut data)| { + .filter_map(|digest| { + digest.as_pre_runtime().and_then(|(id, mut data)| { if id == SASSAFRAS_ENGINE_ID { PreDigest::decode(&mut data).ok() } else { @@ -289,6 +289,9 @@ pub mod pallet { // already occurred at this point, so the let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); + + // TODO-SASS-P3: apparently this is not 100% ok + // `vrf_output` should be processed using `attach_input_hash(&pubkey, transcript)` Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); // If we are in the second half of the epoch, we can start sorting the next epoch From a02143c1f8c419b6759c7f7b000f1c0281e916d1 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 15 Nov 2022 19:33:00 +0100 Subject: [PATCH 13/62] Fixes after master merge --- Cargo.lock | 165 ++++++++++++++++++++++ bin/node-sassafras/node/Cargo.toml | 4 +- bin/node-sassafras/runtime/Cargo.toml | 6 +- client/consensus/sassafras/Cargo.toml | 8 +- client/consensus/sassafras/src/tests.rs | 11 +- frame/sassafras/Cargo.toml | 12 +- primitives/consensus/sassafras/Cargo.toml | 10 +- 7 files changed, 190 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 962d5f3adbe32..bda70fec67925 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2872,6 +2872,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hmac" version = "0.8.1" @@ -4691,6 +4697,83 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.1.0" +dependencies = [ + "clap 4.0.11", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-sassafras", + "sc-executor", + "sc-finality-grandpa", + "sc-keystore", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -6004,6 +6087,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -7938,6 +8042,44 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.1.0" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "schnorrkel", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -9494,6 +9636,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.1.0" +dependencies = [ + "async-trait", + "merlin", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -10394,6 +10557,7 @@ dependencies = [ "log", "memory-db", "pallet-babe", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "parity-util-mem", @@ -10408,6 +10572,7 @@ dependencies = [ "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-finality-grandpa", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 46e9216e9f1aa..dd0d1bc938e89 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -19,7 +19,7 @@ name = "node-sassafras" clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sp-core = { version = "7.0.0", path = "../../../primitives/core" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor", features = ["wasmtime"] } sc-service = { version = "0.10.0-dev", path = "../../../client/service", features = ["wasmtime"] } sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } @@ -33,7 +33,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 14268608af6ea..b0e11bd8a6b0d 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -29,12 +29,12 @@ frame-executive = { version = "4.0.0-dev", default-features = false, path = "../ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../../primitives/consensus/sassafras" } -sp-core = { version = "6.0.0", default-features = false, path = "../../../primitives/core" } +sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index a6c6bb59984f1..ec8ef0fa4136a 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -29,17 +29,17 @@ sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "6.0.0", path = "../../../primitives/application-crypto" } +sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-sassafras = { version = "0.1.0", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 5d1915473eb9f..e3f9848771dc9 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -678,8 +678,8 @@ fn finalization_prunes_epoch_changes_and_removes_weights() { // *-----E(#7)---#11 < fork #1 // Finalize block #10 so that on next epoch change the tree is pruned - env.client.finalize_block(&canon[13], None, true).unwrap(); - let canon_cont = env.propose_and_import_blocks(BlockId::Hash(*canon.last().unwrap()), 4); + env.client.finalize_block(canon[13], None, true).unwrap(); + let canon_tail = env.propose_and_import_blocks(BlockId::Hash(*canon.last().unwrap()), 4); // Post-finalize scenario. // @@ -688,10 +688,9 @@ fn finalization_prunes_epoch_changes_and_removes_weights() { let epoch_changes = epoch_changes.shared_data(); let epoch_changes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| *h).collect(); - // TODO-SASS-P2: this is fixed by a pending PR on substrate - //assert_eq!(epoch_changes, vec![canon[6], canon[12], canon[18], canon_cont[3]]); + assert_eq!(epoch_changes, vec![canon[6], canon[12], canon[18], canon_tail[3]]); - // TODO-SASS-P2 + // TODO-SASS-P3 //todo!("Requires aux_storage_cleanup"); } @@ -777,7 +776,7 @@ fn revert_not_allowed_for_finalized() { let canon = env.propose_and_import_blocks(BlockId::Number(0), 3); // Finalize best block - env.client.finalize_block(&canon[2], None, false).unwrap(); + env.client.finalize_block(canon[2], None, false).unwrap(); // Revert canon chain to last finalized block crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 7955345f8daef..867898b3811af 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -21,16 +21,16 @@ log = { version = "0.4.17", default-features = false } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] hex-literal = "0.3.4" -sp-core = { version = "6.0.0", path = "../../primitives/core" } -sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } [features] default = ["std"] diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 08d089c4b7682..9e8356050ad18 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -20,14 +20,14 @@ merlin = { version = "2.0", default-features = false } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../vrf" } -sp-core = { version = "6.0.0", default-features = false, path = "../../core" } +sp-core = { version = "7.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../../keystore" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../std" } +sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } [features] From 924c51a2f8b14a462087850549dba7055de78c14 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 15 Nov 2022 19:44:00 +0100 Subject: [PATCH 14/62] Bump Sassafras crates versions to 0.3.0 --- Cargo.lock | 10 +++++----- bin/node-sassafras/node/Cargo.toml | 8 ++++---- bin/node-sassafras/runtime/Cargo.toml | 10 +++++----- client/consensus/sassafras/Cargo.toml | 4 ++-- frame/sassafras/Cargo.toml | 4 ++-- primitives/consensus/sassafras/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 4 ++-- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bda70fec67925..f44268f0d9e19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4699,7 +4699,7 @@ dependencies = [ [[package]] name = "node-sassafras" -version = "0.1.0" +version = "0.3.0" dependencies = [ "clap 4.0.11", "frame-benchmarking", @@ -4741,7 +4741,7 @@ dependencies = [ [[package]] name = "node-sassafras-runtime" -version = "0.1.0" +version = "0.3.0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -6089,7 +6089,7 @@ dependencies = [ [[package]] name = "pallet-sassafras" -version = "0.1.0" +version = "0.3.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -8044,7 +8044,7 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "0.1.0" +version = "0.3.0" dependencies = [ "async-trait", "fork-tree", @@ -9638,7 +9638,7 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "0.1.0" +version = "0.3.0" dependencies = [ "async-trait", "merlin", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index dd0d1bc938e89..58a276b416a62 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras" -version = "0.1.0" +version = "0.3.0" authors = ["Parity Technologies "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -26,8 +26,8 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } -sc-consensus-sassafras = { version = "0.1.0", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.1.0", path = "../../../primitives/consensus/sassafras" } +sc-consensus-sassafras = { version = "0.3.0", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.0", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } @@ -56,7 +56,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarkin frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } # Local Dependencies -node-sassafras-runtime = { version = "0.1.0", path = "../runtime" } +node-sassafras-runtime = { version = "0.3.0", path = "../runtime" } # CLI-specific dependencies try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index b0e11bd8a6b0d..6aa670d7b36bc 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras-runtime" -version = "0.1.0" +version = "0.3.0" authors = ["Parity Technologies "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../../frame/sassafras" } +pallet-sassafras = { version = "0.3.0", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } @@ -28,7 +28,7 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} -sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } @@ -58,7 +58,7 @@ std = [ "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", - "pallet-sassafras/std", + "pallet-sassafras/std", "pallet-balances/std", "pallet-grandpa/std", "pallet-sudo/std", @@ -85,7 +85,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", - "pallet-sassafras/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index ec8ef0fa4136a..df996bf9bea94 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-sassafras" -version = "0.1.0" +version = "0.3.0" authors = ["Parity Technologies "] description = "Sassafras consensus algorithm for substrate" edition = "2021" @@ -33,7 +33,7 @@ sp-application-crypto = { version = "7.0.0", path = "../../../primitives/applica sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { version = "0.1.0", path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.0", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 867898b3811af..0ed0c5c6654c0 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sassafras" -version = "0.1.0" +version = "0.3.0" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -22,7 +22,7 @@ pallet-session = { version = "4.0.0-dev", default-features = false, path = "../s pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../primitives/consensus/sassafras" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 9e8356050ad18..fd9aa2fcdd3ce 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "0.1.0" +version = "0.3.0" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2021" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index f66d2293ed0c7..0d820fe45166f 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,7 +18,7 @@ beefy-merkle-tree = { version = "4.0.0-dev", default-features = false, path = ". sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-sassafras = { version = "0.1.0", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -36,7 +36,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } -pallet-sassafras = { version = "0.1.0", default-features = false, path = "../../frame/sassafras" } +pallet-sassafras = { version = "0.3.0", default-features = false, path = "../../frame/sassafras" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } From 5b21f45313b005e79ccdf6f83435af94af80731b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 24 Jan 2023 13:04:52 +0100 Subject: [PATCH 15/62] Sassafras Iteration 3.1 (#12713) * Better tickets structure * Warp sync support * Misc fixes (details on the go) --- Cargo.lock | 10 +- bin/node-sassafras/node/Cargo.toml | 8 +- bin/node-sassafras/runtime/Cargo.toml | 6 +- bin/node-sassafras/runtime/src/lib.rs | 18 ++- client/consensus/sassafras/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 14 ++- .../consensus/sassafras/src/block_import.rs | 88 +++++++++++++++ client/consensus/sassafras/src/lib.rs | 21 +++- .../consensus/sassafras/src/verification.rs | 104 +++++++++--------- frame/sassafras/Cargo.toml | 4 +- frame/sassafras/src/lib.rs | 90 ++++++++++----- primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/consensus/sassafras/src/lib.rs | 55 ++++++++- test-utils/runtime/Cargo.toml | 4 +- test-utils/runtime/src/lib.rs | 60 ++++++---- 15 files changed, 346 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f44268f0d9e19..871bae578ef89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4699,7 +4699,7 @@ dependencies = [ [[package]] name = "node-sassafras" -version = "0.3.0" +version = "0.3.1-dev" dependencies = [ "clap 4.0.11", "frame-benchmarking", @@ -4741,7 +4741,7 @@ dependencies = [ [[package]] name = "node-sassafras-runtime" -version = "0.3.0" +version = "0.3.1-dev" dependencies = [ "frame-benchmarking", "frame-executive", @@ -6089,7 +6089,7 @@ dependencies = [ [[package]] name = "pallet-sassafras" -version = "0.3.0" +version = "0.3.1-dev" dependencies = [ "frame-benchmarking", "frame-support", @@ -8044,7 +8044,7 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "0.3.0" +version = "0.3.1-dev" dependencies = [ "async-trait", "fork-tree", @@ -9638,7 +9638,7 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "0.3.0" +version = "0.3.1-dev" dependencies = [ "async-trait", "merlin", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 58a276b416a62..48453f20c1446 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras" -version = "0.3.0" +version = "0.3.1-dev" authors = ["Parity Technologies "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -26,8 +26,8 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } -sc-consensus-sassafras = { version = "0.3.0", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.3.0", path = "../../../primitives/consensus/sassafras" } +sc-consensus-sassafras = { version = "0.3.1-dev", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } @@ -56,7 +56,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarkin frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } # Local Dependencies -node-sassafras-runtime = { version = "0.3.0", path = "../runtime" } +node-sassafras-runtime = { version = "0.3.1-dev", path = "../runtime" } # CLI-specific dependencies try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 6aa670d7b36bc..8fe0108ae4de0 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras-runtime" -version = "0.3.0" +version = "0.3.1-dev" authors = ["Parity Technologies "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -pallet-sassafras = { version = "0.3.0", default-features = false, path = "../../../frame/sassafras" } +pallet-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } @@ -28,7 +28,7 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} -sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 4db5e3aa3dd2a..95b38ffa718c2 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -379,16 +379,6 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { - sp_consensus_sassafras::SassafrasConfiguration { - slot_duration: SLOT_DURATION_IN_MILLISECONDS, - epoch_duration: EPOCH_DURATION_IN_SLOTS, - authorities: Sassafras::authorities().to_vec(), - randomness: Sassafras::randomness(), - threshold_params: Sassafras::config(), - } - } - fn submit_tickets_unsigned_extrinsic( tickets: Vec ) -> bool { @@ -399,6 +389,14 @@ impl_runtime_apis! { Sassafras::slot_ticket(slot) } + fn current_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::current_epoch() + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::next_epoch() + } + fn generate_key_ownership_proof( _slot: sp_consensus_sassafras::Slot, _authority_id: sp_consensus_sassafras::AuthorityId, diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index df996bf9bea94..d6dec3a830b46 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-sassafras" -version = "0.3.0" +version = "0.3.1-dev" authors = ["Parity Technologies "] description = "Sassafras consensus algorithm for substrate" edition = "2021" @@ -33,7 +33,7 @@ sp-application-crypto = { version = "7.0.0", path = "../../../primitives/applica sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { version = "0.3.0", path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 9e620194ac09d..563a0fdf89fb7 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -55,9 +55,9 @@ pub(crate) fn claim_slot( let (authority_idx, ticket_aux) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); - let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket)?.clone(); + let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket.output)?.clone(); log::debug!(target: "sassafras", "🌳 Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", - &ticket.as_bytes()[0..8], authority_idx, ticket_aux.attempt); + &ticket.output.as_bytes()[0..8], authority_idx, ticket_aux.attempt); (authority_idx, Some(ticket_aux)) }, None => { @@ -128,7 +128,11 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> V ) .ok()??; - let ticket = VRFOutput(signature.output); + let ticket = Ticket { + output: VRFOutput(signature.output), + // TODO-SASS-P3 + proof: VRFProof::try_from([0; 64]).expect("FIXME"), + }; if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { return None } @@ -141,8 +145,10 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> V for attempt in 0..max_attempts { if let Some((ticket, ticket_aux)) = make_ticket(attempt) { + epoch + .tickets_aux + .insert(ticket.output, (authority_idx as AuthorityIndex, ticket_aux)); tickets.push(ticket); - epoch.tickets_aux.insert(ticket, (authority_idx as AuthorityIndex, ticket_aux)); } } } diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 81563256ef30e..bf6e37a825b76 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -20,6 +20,7 @@ use super::*; use sc_client_api::{AuxDataOperations, FinalityNotification, PreCommitActions}; +use sp_blockchain::BlockStatus; /// Block-import handler for Sassafras. /// @@ -230,6 +231,77 @@ where } } +impl SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + /// Import whole state after a warp sync. + /// + /// This function makes multiple transactions to the DB. If one of them fails we may + /// end up in an inconsistent state and have to resync + async fn import_state( + &mut self, + mut block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let parent_hash = *block.header.parent_hash(); + let number = *block.header.number(); + + // Check for the unit tag. + block.remove_intermediate::<()>(INTERMEDIATE_KEY)?; + + // Import as best + block.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + // Reset block weight + aux_schema::write_block_weight(hash, 0, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // First make the client import the state + let aux = match self.inner.import_block(block, new_cache).await { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => + return Err(ConsensusError::ClientImport(format!( + "Unexpected import result: {:?}", + r + ))), + Err(e) => return Err(e.into()), + }; + + // Read epoch info from the imported state + let block_id = BlockId::Hash(hash); + let curr_epoch = self.client.runtime_api().current_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + let next_epoch = self.client.runtime_api().next_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + + let mut epoch_changes = self.epoch_changes.shared_data(); + epoch_changes.reset(parent_hash, hash, number, curr_epoch.into(), next_epoch.into()); + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + self.client.insert_aux(insert, []) + }) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(ImportResult::Imported(aux)) + } +} + #[async_trait::async_trait] impl BlockImport for SassafrasBlockImport where @@ -255,6 +327,22 @@ where let hash = block.post_hash(); let number = *block.header.number(); + // Early exit if block already in chain, otherwise the check for epoch changes + // will error when trying to re-import + match self.client.status(BlockId::Hash(hash)) { + Ok(BlockStatus::InChain) => { + block.remove_intermediate::>(INTERMEDIATE_KEY)?; + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) + }, + Ok(BlockStatus::Unknown) => {}, + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + + if block.with_state() { + return self.import_state(block, new_cache).await + } + let viable_epoch_desc = block .remove_intermediate::>(INTERMEDIATE_KEY)? .epoch_descriptor; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index f5134e38266f7..be24e2b77f3a3 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -184,17 +184,28 @@ fn sassafras_err(error: Error) -> Error { error } -/// Sassafras epoch information +/// Sassafras epoch information augmented with private tickets information. #[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] pub struct Epoch { /// The epoch index. pub epoch_idx: u64, /// The starting slot of the epoch. pub start_slot: Slot, - /// Epoch configuration + /// Epoch configuration. pub config: SassafrasConfiguration, /// Tickets auxiliary data. - pub tickets_aux: BTreeMap, + pub tickets_aux: BTreeMap, +} + +impl From for Epoch { + fn from(epoch: sp_consensus_sassafras::Epoch) -> Self { + Epoch { + epoch_idx: epoch.epoch_idx, + start_slot: epoch.start_slot, + config: epoch.config, + tickets_aux: BTreeMap::new(), + } + } } impl EpochT for Epoch { @@ -252,8 +263,8 @@ where info.genesis_hash }); - let config = client.runtime_api().configuration(&BlockId::Hash(hash))?; - Ok(config) + let epoch = client.runtime_api().current_epoch(&BlockId::Hash(hash))?; + Ok(epoch.config) } /// Intermediate value passed to block importer from authoring or validation logic. diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index a7f6707565d0b..74ec2e865ee82 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -23,24 +23,28 @@ use super::*; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; -/// Sassafras verification parameters -pub struct VerificationParams<'a, B: 'a + BlockT> { +/// Verification parameters +struct VerificationParams<'a, B: 'a + BlockT> { /// The header being verified. - pub header: B::Header, + header: B::Header, /// The pre-digest of the header being verified. - pub pre_digest: PreDigest, + pre_digest: &'a PreDigest, /// The slot number of the current time. - pub slot_now: Slot, + slot_now: Slot, /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. - pub epoch: &'a Epoch, + epoch: &'a Epoch, + /// Origin + origin: BlockOrigin, /// Expected ticket for this block. - pub ticket: Option, + ticket: Option, } -pub struct VerifiedHeaderInfo { - pub authority_id: AuthorityId, - pub pre_digest: DigestItem, - pub seal: DigestItem, +/// Verified information +struct VerifiedHeaderInfo { + /// Authority index. + authority_id: AuthorityId, + /// Seal found within the header. + seal: DigestItem, } /// Check a header has been signed by the right key. If the slot is too far in @@ -52,10 +56,10 @@ pub struct VerifiedHeaderInfo { /// /// The given header can either be from a primary or secondary slot assignment, /// with each having different validation logic. -pub fn check_header( +fn check_header( params: VerificationParams, ) -> Result, Error> { - let VerificationParams { mut header, pre_digest, slot_now, epoch, ticket } = params; + let VerificationParams { mut header, pre_digest, slot_now, epoch, origin, ticket } = params; let config = &epoch.config; // Check that the slot is not in the future, with some drift being allowed. @@ -92,7 +96,7 @@ pub fn check_header( let transcript = make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_idx); schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_aux.proof)) + .and_then(|p| p.vrf_verify(transcript, &ticket.output, &ticket_aux.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { @@ -107,10 +111,11 @@ pub fn check_header( log::warn!(target: "sassafras", "🌳 Unexpected secondary authoring mechanism"); return Err(Error::UnexpectedAuthoringMechanism) }, - (None, Some(_)) => { - log::warn!(target: "sassafras", "🌳 Unexpected primary authoring mechanism"); - return Err(Error::UnexpectedAuthoringMechanism) - }, + (None, Some(_)) => + if origin != BlockOrigin::NetworkInitialSync { + log::warn!(target: "sassafras", "🌳 Unexpected primary authoring mechanism"); + return Err(Error::UnexpectedAuthoringMechanism) + }, } // Check slot-vrf proof @@ -120,11 +125,7 @@ pub fn check_header( .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; - let info = VerifiedHeaderInfo { - authority_id, - pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), - seal, - }; + let info = VerifiedHeaderInfo { authority_id, seal }; Ok(CheckedHeader::Checked(header, info)) } @@ -317,11 +318,12 @@ where // read it from the state after import. We also skip all verifications // because there's no parent state and we trust the sync module to verify // that the state is correct and finalized. + // Just insert a tag to notify that this is indeed a Sassafras block to the + // `BlockImport` implementation. + block.insert_intermediate(INTERMEDIATE_KEY, ()); return Ok((block, Default::default())) } - trace!(target: "sassafras", "🌳 We have {:?} logs in this header", block.header.digest().logs().len()); - let hash = block.header.hash(); let parent_hash = *block.header.parent_hash(); @@ -340,7 +342,7 @@ where let pre_digest = find_pre_digest::(&block.header)?; - let (check_header, epoch_descriptor) = { + let (checked_header, epoch_descriptor) = { let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( @@ -359,34 +361,31 @@ where .client .runtime_api() .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) - .map_err(|err| err.to_string())?; + .ok() + .unwrap_or_else(|| None); - let v_params = VerificationParams { + let verification_params = VerificationParams { header: block.header.clone(), - pre_digest, + pre_digest: &pre_digest, slot_now, epoch: viable_epoch.as_ref(), + origin: block.origin, ticket, }; + let checked_header = check_header::(verification_params)?; - (check_header::(v_params)?, epoch_descriptor) + (checked_header, epoch_descriptor) }; - match check_header { + match checked_header { CheckedHeader::Checked(pre_header, verified_info) => { - let sassafras_pre_digest = verified_info - .pre_digest - .as_sassafras_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - let slot = sassafras_pre_digest.slot; - // The header is valid but let's check if there was something else already // proposed at the same slot by the given author. If there was, we will // report the equivocation to the runtime. if let Err(err) = self .check_and_report_equivocation( slot_now, - slot, + pre_digest.slot, &block.header, &verified_info.authority_id, &block.origin, @@ -400,20 +399,23 @@ where // internally-set timestamp in the inherents actually matches the slot set in the // seal. if let Some(inner_body) = block.body { - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(slot); let new_block = Block::new(pre_header.clone(), inner_body); - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; + if !block.state_action.skip_execution_checks() { + // TODO-SASS-P3 :??? DOC + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(pre_digest.slot); + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + } let (_, inner_body) = new_block.deconstruct(); block.body = Some(inner_body); @@ -428,12 +430,12 @@ where ); block.header = pre_header; + block.post_hash = Some(hash); block.post_digests.push(verified_info.seal); block.insert_intermediate( INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }, ); - block.post_hash = Some(hash); Ok((block, Default::default())) }, diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 0ed0c5c6654c0..22aee8632c968 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sassafras" -version = "0.3.0" +version = "0.3.1-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -22,7 +22,7 @@ pallet-session = { version = "4.0.0-dev", default-features = false, path = "../s pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index fda4226008e88..d465d61ddceaa 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -54,8 +54,9 @@ use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, EquivocationProof, Randomness, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, Slot, Ticket, SASSAFRAS_ENGINE_ID, + AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, + SassafrasConfiguration, SassafrasEpochConfiguration, Slot, Ticket, VRFOutput, + SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -151,6 +152,7 @@ pub mod pallet { /// Next epoch authorities. #[pallet::storage] + #[pallet::getter(fn next_authorities)] pub type NextAuthorities = StorageValue< _, WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, @@ -175,6 +177,7 @@ pub mod pallet { /// Next epoch randomness. #[pallet::storage] + #[pallet::getter(fn next_randomness)] pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; /// Randomness accumulator. @@ -194,6 +197,7 @@ pub mod pallet { /// The configuration for the next epoch. #[pallet::storage] + #[pallet::getter(fn next_config)] pub type NextEpochConfig = StorageValue<_, SassafrasEpochConfiguration>; /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next @@ -302,12 +306,12 @@ pub mod pallet { let mut metadata = TicketsMeta::::get(); if metadata.segments_count != 0 { let epoch_idx = EpochIndex::::get() + 1; - let epoch_key = (epoch_idx & 1) as u8; + let epoch_tag = (epoch_idx & 1) as u8; if metadata.segments_count != 0 { let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); Self::sort_tickets( u32::max(1, metadata.segments_count / slots_left as u32), - epoch_key, + epoch_tag, &mut metadata, ); TicketsMeta::::set(metadata); @@ -560,15 +564,15 @@ impl Pallet { }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - let epoch_key = (epoch_idx & 1) as u8; + let epoch_tag = (epoch_idx & 1) as u8; let mut tickets_metadata = TicketsMeta::::get(); // Optionally finish sorting if tickets_metadata.segments_count != 0 { - Self::sort_tickets(tickets_metadata.segments_count, epoch_key, &mut tickets_metadata); + Self::sort_tickets(tickets_metadata.segments_count, epoch_tag, &mut tickets_metadata); } // Clear the prev (equal to the next) epoch tickets counter. - let next_epoch_key = epoch_key ^ 1; - tickets_metadata.tickets_count[next_epoch_key as usize] = 0; + let next_epoch_tag = epoch_tag ^ 1; + tickets_metadata.tickets_count[next_epoch_tag as usize] = 0; TicketsMeta::::set(tickets_metadata); } @@ -651,6 +655,36 @@ impl Pallet { Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } + /// Current epoch configuration. + pub fn current_epoch() -> Epoch { + let config = SassafrasConfiguration { + slot_duration: T::SlotDuration::get(), + epoch_duration: T::EpochDuration::get(), + authorities: Self::authorities().to_vec(), + randomness: Self::randomness(), + threshold_params: Self::config(), + }; + let epoch_idx = EpochIndex::::get(); + let start_slot = Self::current_epoch_start(); + Epoch { epoch_idx, start_slot, config } + } + + /// Current epoch configuration. + pub fn next_epoch() -> Epoch { + let config = SassafrasConfiguration { + slot_duration: T::SlotDuration::get(), + epoch_duration: T::EpochDuration::get(), + authorities: Self::next_authorities().to_vec(), + randomness: Self::next_randomness(), + threshold_params: Self::next_config().unwrap_or_else(|| Self::config()), + }; + let epoch_idx = EpochIndex::::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + let start_slot = Self::epoch_start(epoch_idx); + Epoch { epoch_idx, start_slot, config } + } + /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. /// /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, @@ -689,15 +723,15 @@ impl Pallet { ticket_idx as u32 }; - let mut epoch_key = (epoch_idx & 1) as u8; + let mut epoch_tag = (epoch_idx & 1) as u8; if duration <= slot_idx && slot_idx < 2 * duration { // Try to get a ticket for the next epoch. Since its state values were not enacted yet, // we may have to finish sorting the tickets. - epoch_key ^= 1; + epoch_tag ^= 1; slot_idx -= duration; if tickets_meta.segments_count != 0 { - Self::sort_tickets(tickets_meta.segments_count, epoch_key, &mut tickets_meta); + Self::sort_tickets(tickets_meta.segments_count, epoch_tag, &mut tickets_meta); TicketsMeta::::set(tickets_meta.clone()); } } else if slot_idx >= 2 * duration { @@ -705,42 +739,47 @@ impl Pallet { } let ticket_idx = get_ticket_idx(slot_idx); - if ticket_idx < tickets_meta.tickets_count[epoch_key as usize] { - Tickets::::get((epoch_key, ticket_idx)) + if ticket_idx < tickets_meta.tickets_count[epoch_tag as usize] { + Tickets::::get((epoch_tag, ticket_idx)) } else { None } } // Lexicographically sort the tickets who belongs to the next epoch. - // The tickets are fetched from at most `max_iter` segments received via the `submit_tickets` - // extrinsic. The resulting sorted vector is truncated and if all the segments where sorted - // it is saved to be as the next epoch tickets. - // Else the result is saved to be used by next calls. - fn sort_tickets(max_iter: u32, epoch_key: u8, metadata: &mut TicketsMetadata) { + // + // Tickets are fetched from at most `max_iter` segments received via the `submit_tickets` + // extrinsic. + // + // The resulting sorted vector is optionally truncated to contain at most `MaxTickets` + // entries. If all the segments were consumed then the sorted vector is saved as the + // next epoch tickets, else it is saved to be used by next calls to this function. + fn sort_tickets(max_iter: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { let mut segments_count = metadata.segments_count; let max_iter = max_iter.min(segments_count); let max_tickets = T::MaxTickets::get() as usize; + // Fetch the partial result. let mut new_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); let mut require_sort = max_iter != 0; let mut sup = if new_segment.len() >= max_tickets { - new_segment[new_segment.len() - 1] + new_segment[new_segment.len() - 1].output } else { - Ticket::try_from([0xFF; 32]).expect("This is a valid ticket value; qed") + VRFOutput::try_from([0xFF; 32]).expect("This is a valid vrf output value; qed") }; + // Consume at most `max_iter` segments. for _ in 0..max_iter { let segment = NextTicketsSegments::::take(segments_count); - segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); + segment.into_iter().filter(|t| t.output < sup).for_each(|t| new_segment.push(t)); if new_segment.len() > max_tickets { require_sort = false; new_segment.sort_unstable(); new_segment.truncate(max_tickets); - sup = new_segment[new_segment.len() - 1]; + sup = new_segment[max_tickets - 1].output; } segments_count -= 1; @@ -751,13 +790,14 @@ impl Pallet { } if segments_count == 0 { - // Sort is over, write to the map. + // Sort is over, write to next epoch map. // TODO-SASS-P3: is there a better way to write a map from a vector? new_segment.iter().enumerate().for_each(|(i, t)| { - Tickets::::insert((epoch_key, i as u32), t); + Tickets::::insert((epoch_tag, i as u32), t); }); - metadata.tickets_count[epoch_key as usize] = new_segment.len() as u32; + metadata.tickets_count[epoch_tag as usize] = new_segment.len() as u32; } else { + // Keep the partial result for next invocations. NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); } diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index fd9aa2fcdd3ce..7fdec9e22371d 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "0.3.0" +version = "0.3.1-dev" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2021" diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c4bd2daca8f97..d84e0c7ef5352 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -103,6 +103,17 @@ impl SassafrasConfiguration { } } +/// Sassafras epoch information +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_idx: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// Epoch configuration. + pub config: SassafrasConfiguration, +} + /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -114,14 +125,43 @@ pub struct SassafrasEpochConfiguration { } /// Ticket type. -pub type Ticket = VRFOutput; +// TODO-SASS-P3: we are currently using Shnorrkel structures as placeholders. +// Should switch to new RVRF primitive. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct Ticket { + /// Ring VRF output. + pub output: VRFOutput, + /// Ring VRF commitment proof. + pub proof: VRFProof, + // Ticket opaque utility data. + // TODO-SASS-P3: Interpretation of this data is up to the application? Investigate + // Suggested by Jeff: + // - ephemeral_pk: public key used to... + // - revealed_pk: ??? + // - gossip_auth_id: identifier to reach this actor in a separate gossip network + //pub data: Vec, +} + +use core::cmp::Ordering; + +impl PartialOrd for Ticket { + fn partial_cmp(&self, other: &Self) -> Option { + self.output.partial_cmp(&other.output) + } +} + +impl Ord for Ticket { + fn cmp(&self, other: &Self) -> Ordering { + self.output.cmp(&other.output) + } +} /// Ticket auxiliary information. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketAux { /// Attempt number. pub attempt: u32, - /// Ticket proof. + /// Ticket revelation proof. pub proof: VRFProof, } @@ -144,7 +184,7 @@ pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: /// Returns true if the given VRF output is lower than the given threshold, false otherwise. pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { - U256::from(ticket.as_bytes()) < threshold + U256::from(ticket.output.as_bytes()) < threshold } /// An opaque type used to represent the key ownership proof at the runtime API boundary. @@ -172,9 +212,6 @@ impl OpaqueKeyOwnershipProof { sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { - /// Return the genesis configuration for Sassafras. The configuration is only read on genesis. - fn configuration() -> SassafrasConfiguration; - /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; @@ -182,6 +219,12 @@ sp_api::decl_runtime_apis! { /// Get expected ticket for the given slot. fn slot_ticket(slot: Slot) -> Option; + /// Current epoch information. + fn current_epoch() -> Epoch; + + /// Next epoch information. + fn next_epoch() -> Epoch; + /// Generates a proof of key ownership for the given authority in the current epoch. /// /// An example usage of this module is coupled with the session historical module to prove diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 0d820fe45166f..a492e3124fc0c 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,7 +18,7 @@ beefy-merkle-tree = { version = "4.0.0-dev", default-features = false, path = ". sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-sassafras = { version = "0.3.0", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -36,7 +36,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } -pallet-sassafras = { version = "0.3.0", default-features = false, path = "../../frame/sassafras" } +pallet-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../frame/sassafras" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 7025f9fdcc079..df8a013ad3a20 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -969,24 +969,30 @@ cfg_if! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + >::submit_tickets_unsigned_extrinsic(tickets) + } + + fn current_epoch() -> sp_consensus_sassafras::Epoch { let authorities = system::authorities().into_iter().map(|x| { let authority: sr25519::Public = x.into(); (SassafrasId::from(authority), 1) }).collect(); - sp_consensus_sassafras::SassafrasConfiguration { - slot_duration: SlotDuration::get(), - epoch_duration: EpochDuration::get(), - authorities, - randomness: >::randomness(), - threshold_params: >::config(), - } + let mut epoch = >::current_epoch(); + epoch.config.authorities = authorities; + epoch } - fn submit_tickets_unsigned_extrinsic( - tickets: Vec - ) -> bool { - >::submit_tickets_unsigned_extrinsic(tickets) + fn next_epoch() -> sp_consensus_sassafras::Epoch { + let authorities = system::authorities().into_iter().map(|x| { + let authority: sr25519::Public = x.into(); + (SassafrasId::from(authority), 1) + }).collect(); + let mut epoch = >::next_epoch(); + epoch.config.authorities = authorities; + epoch } fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { @@ -1285,22 +1291,32 @@ cfg_if! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { - sp_consensus_sassafras::SassafrasConfiguration { - slot_duration: SlotDuration::get(), - epoch_duration: EpochDuration::get(), - authorities: >::authorities().to_vec(), - randomness: >::randomness(), - threshold_params: >::config(), - } - } - fn submit_tickets_unsigned_extrinsic( tickets: Vec ) -> bool { >::submit_tickets_unsigned_extrinsic(tickets) } + fn current_epoch() -> sp_consensus_sassafras::Epoch { + let authorities = system::authorities().into_iter().map(|x| { + let authority: sr25519::Public = x.into(); + (SassafrasId::from(authority), 1) + }).collect(); + let mut epoch = >::current_epoch(); + epoch.config.authorities = authorities; + epoch + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + let authorities = system::authorities().into_iter().map(|x| { + let authority: sr25519::Public = x.into(); + (SassafrasId::from(authority), 1) + }).collect(); + let mut epoch = >::next_epoch(); + epoch.config.authorities = authorities; + epoch + } + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { >::slot_ticket(slot) } From 086630c879351edffe556d35aac22d1c4c73a5f4 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 24 Jan 2023 13:28:41 +0100 Subject: [PATCH 16/62] Introduce TicketEnvelope --- client/consensus/sassafras/src/authorship.rs | 27 ++++++----- client/consensus/sassafras/src/lib.rs | 3 +- .../consensus/sassafras/src/verification.rs | 2 +- frame/sassafras/src/lib.rs | 23 +++++---- frame/sassafras/src/mock.rs | 10 ++-- frame/sassafras/src/tests.rs | 27 +++-------- primitives/consensus/sassafras/src/lib.rs | 48 +++++++++---------- test-utils/runtime/src/lib.rs | 4 +- 8 files changed, 69 insertions(+), 75 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 563a0fdf89fb7..282866e607e5d 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -23,7 +23,7 @@ use super::*; use sp_consensus_sassafras::{ digests::PreDigest, vrf::{make_slot_transcript_data, make_ticket_transcript_data}, - AuthorityId, Slot, Ticket, TicketAux, + AuthorityId, Slot, Ticket, TicketAux, TicketEnvelope, }; use sp_core::{twox_64, ByteArray}; @@ -55,9 +55,9 @@ pub(crate) fn claim_slot( let (authority_idx, ticket_aux) = match ticket { Some(ticket) => { log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); - let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket.output)?.clone(); + let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket)?.clone(); log::debug!(target: "sassafras", "🌳 Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", - &ticket.output.as_bytes()[0..8], authority_idx, ticket_aux.attempt); + &ticket.as_bytes()[0..8], authority_idx, ticket_aux.attempt); (authority_idx, Some(ticket_aux)) }, None => { @@ -92,7 +92,7 @@ pub(crate) fn claim_slot( /// Generate the tickets for the given epoch. /// Tickets additional information will be stored within the `Epoch` structure. /// The additional information will be used later during session to claim slots. -fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { +fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; let redundancy_factor = config.threshold_params.redundancy_factor; @@ -128,27 +128,28 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> V ) .ok()??; - let ticket = Ticket { - output: VRFOutput(signature.output), - // TODO-SASS-P3 - proof: VRFProof::try_from([0; 64]).expect("FIXME"), - }; + let ticket = VRFOutput(signature.output); if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { return None } + let envelope = TicketEnvelope { + ticket, + // TODO-SASS-P3: placeholder... + zk_proof: VRFProof::try_from([0; 64]).expect("FIXME"), + }; let ticket_aux = TicketAux { attempt: attempt as u32, proof: VRFProof(signature.proof) }; - Some((ticket, ticket_aux)) + Some((envelope, ticket_aux)) }; for attempt in 0..max_attempts { - if let Some((ticket, ticket_aux)) = make_ticket(attempt) { + if let Some((envelope, ticket_aux)) = make_ticket(attempt) { epoch .tickets_aux - .insert(ticket.output, (authority_idx as AuthorityIndex, ticket_aux)); - tickets.push(ticket); + .insert(envelope.ticket, (authority_idx as AuthorityIndex, ticket_aux)); + tickets.push(envelope); } } } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index be24e2b77f3a3..88a2aa3f80ced 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -82,7 +82,8 @@ pub use sp_consensus_sassafras::{ vrf::{make_slot_transcript, make_ticket_transcript}, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, - TicketAux, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, + TicketAux, TicketEnvelope, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, + VRF_PROOF_LENGTH, }; mod authorship; diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 74ec2e865ee82..70c235ff7825e 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -96,7 +96,7 @@ fn check_header( let transcript = make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_idx); schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &ticket.output, &ticket_aux.proof)) + .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_aux.proof)) .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index d465d61ddceaa..54d5dec64fa33 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -55,7 +55,7 @@ use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, - SassafrasConfiguration, SassafrasEpochConfiguration, Slot, Ticket, VRFOutput, + SassafrasConfiguration, SassafrasEpochConfiguration, Slot, Ticket, TicketEnvelope, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; @@ -329,7 +329,7 @@ pub mod pallet { #[pallet::weight(10_000)] pub fn submit_tickets( origin: OriginFor, - tickets: BoundedVec, + tickets: BoundedVec, ) -> DispatchResult { ensure_none(origin)?; @@ -337,6 +337,10 @@ pub mod pallet { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); + // TODO-SASS-P4: for sure there is a better way to do this... + let tickets: Vec<_> = tickets.iter().map(|t| t.ticket).collect(); + let tickets = BoundedVec::<_, T::MaxTickets>::try_from(tickets).expect("TODO"); + // We just require a unique key to save the partial tickets list. metadata.segments_count += 1; NextTicketsSegments::::insert(metadata.segments_count, tickets); @@ -450,8 +454,9 @@ pub mod pallet { ); if !tickets .iter() - .all(|ticket| sp_consensus_sassafras::check_threshold(ticket, threshold)) + .all(|t| sp_consensus_sassafras::check_threshold(&t.ticket, threshold)) { + // TODO-SASS-P3: also check ZK proof to assert origin validity return InvalidTransaction::Custom(0).into() } @@ -765,21 +770,21 @@ impl Pallet { let mut require_sort = max_iter != 0; let mut sup = if new_segment.len() >= max_tickets { - new_segment[new_segment.len() - 1].output + new_segment[new_segment.len() - 1] } else { - VRFOutput::try_from([0xFF; 32]).expect("This is a valid vrf output value; qed") + Ticket::try_from([0xFF; 32]).expect("This is a valid vrf output value; qed") }; // Consume at most `max_iter` segments. for _ in 0..max_iter { let segment = NextTicketsSegments::::take(segments_count); - segment.into_iter().filter(|t| t.output < sup).for_each(|t| new_segment.push(t)); + segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); if new_segment.len() > max_tickets { require_sort = false; new_segment.sort_unstable(); new_segment.truncate(max_tickets); - sup = new_segment[max_tickets - 1].output; + sup = new_segment[max_tickets - 1]; } segments_count -= 1; @@ -812,9 +817,9 @@ impl Pallet { /// second half are dropped. /// /// TODO-SASS-P3: we have to add the zk validity proofs - pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { + pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); - tickets.sort_unstable(); + tickets.sort_unstable_by_key(|t| t.ticket); let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index c7bd93c0b2175..d9c0e6edfc743 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -24,7 +24,7 @@ use scale_codec::Encode; use sp_consensus_sassafras::{ digests::PreDigest, vrf::{self, VRFOutput, VRFProof}, - AuthorityIndex, AuthorityPair, Slot, + AuthorityIndex, AuthorityPair, Slot, TicketEnvelope, }; use sp_core::{ crypto::{IsWrappedBy, Pair}, @@ -158,10 +158,14 @@ fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput /// Construct at most `attempts` tickets for the given `slot`. /// TODO-SASS-P3: filter out invalid tickets according to test threshold. -pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec<(VRFOutput, VRFProof)> { +/// E.g. by passing an optional threshold +pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec { (0..attempts) .into_iter() - .map(|attempt| make_ticket_vrf(slot, attempt, pair)) + .map(|attempt| { + let (ticket, zk_proof) = make_ticket_vrf(slot, attempt, pair); + TicketEnvelope { ticket, zk_proof } + }) .collect() } diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 9c5828ac50b62..0e2e11c0aee96 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -352,11 +352,8 @@ fn segments_incremental_sortition_works() { // Submit authoring tickets in three different batches. // We can ignore the threshold since we are not passing through the unsigned extrinsic // validation. - let mut tickets: Vec = - make_tickets(start_slot + 1, segments_num * max_tickets, pair) - .into_iter() - .map(|(output, _)| output) - .collect(); + let tickets: Vec = + make_tickets(start_slot + 1, segments_num * max_tickets, pair); let segment_len = tickets.len() / segments_num as usize; for i in 0..segments_num as usize { let segment = @@ -364,10 +361,6 @@ fn segments_incremental_sortition_works() { Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); } - tickets.sort(); - tickets.truncate(max_tickets as usize); - let _expected_tickets = tickets; - let epoch_duration: u64 = ::EpochDuration::get(); // Proceed to half of the epoch (sortition should not have been started yet) @@ -445,10 +438,7 @@ fn submit_enact_claim_tickets() { // Submit authoring tickets in three different batches. // We can ignore the threshold since we are not passing through the unsigned extrinsic // validation. - let mut tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]) - .into_iter() - .map(|(output, _)| output) - .collect(); + let tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]); let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), tickets0).unwrap(); let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); @@ -456,9 +446,9 @@ fn submit_enact_claim_tickets() { let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), tickets2).unwrap(); - tickets.sort(); - tickets.truncate(max_tickets as usize); - let expected_tickets = tickets; + let mut expected_tickets: Vec<_> = tickets.into_iter().map(|t| t.ticket).collect(); + expected_tickets.sort(); + expected_tickets.truncate(max_tickets as usize); // Check state after submit let meta = TicketsMeta::::get(); @@ -520,10 +510,7 @@ fn block_allowed_to_skip_epochs() { System::initialize(&start_block, &Default::default(), &digest); Sassafras::on_initialize(start_block); - let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]) - .into_iter() - .map(|(output, _)| output) - .collect(); + let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]); Sassafras::submit_tickets( RuntimeOrigin::none(), BoundedVec::truncate_from(tickets.clone()), diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index d84e0c7ef5352..4a09530eb60f3 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -124,15 +124,25 @@ pub struct SassafrasEpochConfiguration { pub attempts_number: u32, } -/// Ticket type. +/// Ticket value. +pub type Ticket = VRFOutput; + +/// Ticket proof. +pub type TicketProof = VRFProof; + +/// Ticket ZK commitment proof. +/// TODO-SASS-P3: this is a placeholder. +pub type TicketZkProof = VRFProof; + +/// Ticket envelope used on submission. // TODO-SASS-P3: we are currently using Shnorrkel structures as placeholders. -// Should switch to new RVRF primitive. +// Should switch to new RVRF primitive soon. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct Ticket { +pub struct TicketEnvelope { /// Ring VRF output. - pub output: VRFOutput, - /// Ring VRF commitment proof. - pub proof: VRFProof, + pub ticket: Ticket, + /// Ring VRF zk proof. + pub zk_proof: TicketZkProof, // Ticket opaque utility data. // TODO-SASS-P3: Interpretation of this data is up to the application? Investigate // Suggested by Jeff: @@ -142,27 +152,13 @@ pub struct Ticket { //pub data: Vec, } -use core::cmp::Ordering; - -impl PartialOrd for Ticket { - fn partial_cmp(&self, other: &Self) -> Option { - self.output.partial_cmp(&other.output) - } -} - -impl Ord for Ticket { - fn cmp(&self, other: &Self) -> Ordering { - self.output.cmp(&other.output) - } -} - -/// Ticket auxiliary information. +/// Ticket private auxiliary information. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketAux { /// Attempt number. pub attempt: u32, - /// Ticket revelation proof. - pub proof: VRFProof, + /// Ticket proof used to claim a slot. + pub proof: TicketProof, } /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: @@ -184,7 +180,7 @@ pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: /// Returns true if the given VRF output is lower than the given threshold, false otherwise. pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { - U256::from(ticket.output.as_bytes()) < threshold + U256::from(ticket.as_bytes()) < threshold } /// An opaque type used to represent the key ownership proof at the runtime API boundary. @@ -214,9 +210,9 @@ sp_api::decl_runtime_apis! { pub trait SassafrasApi { /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. - fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; + fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; - /// Get expected ticket for the given slot. + /// Get expected ticket value for the given slot. fn slot_ticket(slot: Slot) -> Option; /// Current epoch information. diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index df8a013ad3a20..6c856077cdb7e 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -970,7 +970,7 @@ cfg_if! { impl sp_consensus_sassafras::SassafrasApi for Runtime { fn submit_tickets_unsigned_extrinsic( - tickets: Vec + tickets: Vec ) -> bool { >::submit_tickets_unsigned_extrinsic(tickets) } @@ -1292,7 +1292,7 @@ cfg_if! { impl sp_consensus_sassafras::SassafrasApi for Runtime { fn submit_tickets_unsigned_extrinsic( - tickets: Vec + tickets: Vec ) -> bool { >::submit_tickets_unsigned_extrinsic(tickets) } From a358c9f88f1a658d63964cea7f8cd7baca7b9f5b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 24 Jan 2023 14:33:48 +0100 Subject: [PATCH 17/62] Fix after master merge --- Cargo.lock | 166 ++++++++++++++++++ bin/node-sassafras/node/Cargo.toml | 10 +- client/consensus/sassafras/Cargo.toml | 5 +- .../consensus/sassafras/src/block_import.rs | 20 +-- client/consensus/sassafras/src/tests.rs | 94 +++++----- .../consensus/sassafras/src/verification.rs | 1 + frame/sassafras/src/lib.rs | 3 + .../consensus/sassafras/src/inherents.rs | 2 +- 8 files changed, 230 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08948ffbd970b..842cb1e4a1935 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2871,6 +2871,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hkdf" version = "0.12.3" @@ -4891,6 +4897,83 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.1-dev" +dependencies = [ + "clap 4.0.32", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-sassafras", + "sc-executor", + "sc-finality-grandpa", + "sc-keystore", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.1-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -6285,6 +6368,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.1-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -8246,6 +8350,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.1-dev" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "schnorrkel", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -9803,6 +9946,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.1-dev" +dependencies = [ + "async-trait", + "merlin", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -10696,6 +10860,7 @@ dependencies = [ "log", "memory-db", "pallet-babe", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -10710,6 +10875,7 @@ dependencies = [ "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-finality-grandpa", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 48453f20c1446..72845823eaeb9 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -18,10 +18,10 @@ name = "node-sassafras" [dependencies] clap = { version = "4.0.9", features = ["derive"] } -sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } -sc-executor = { version = "0.10.0-dev", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.10.0-dev", path = "../../../client/service", features = ["wasmtime"] } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-service = { version = "0.10.0-dev", path = "../../../client/service" } sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } @@ -36,12 +36,12 @@ sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.15.1", features = ["server"] } +jsonrpsee = { version = "0.16.2", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index d6dec3a830b46..f280841fd266f 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -45,6 +45,7 @@ sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } \ No newline at end of file +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +tokio = "1.22.0" \ No newline at end of file diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index bf6e37a825b76..bf00ab3c6ecc8 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -329,7 +329,7 @@ where // Early exit if block already in chain, otherwise the check for epoch changes // will error when trying to re-import - match self.client.status(BlockId::Hash(hash)) { + match self.client.status(hash) { Ok(BlockStatus::InChain) => { block.remove_intermediate::>(INTERMEDIATE_KEY)?; block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); @@ -347,17 +347,6 @@ where .remove_intermediate::>(INTERMEDIATE_KEY)? .epoch_descriptor; - // Early exit if block already in chain, otherwise the check for - // epoch changes will error when trying to re-import an epoch change. - match self.client.status(BlockId::Hash(hash)) { - Ok(sp_blockchain::BlockStatus::InChain) => { - block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); - return self.inner.import_block(block, new_cache).await.map_err(Into::into) - }, - Ok(sp_blockchain::BlockStatus::Unknown) => {}, - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - } - let pre_digest = find_pre_digest::(&block.header) .expect("valid headers contain a pre-digest; header has been already verified; qed"); let slot = pre_digest.slot; @@ -365,7 +354,7 @@ where let parent_hash = *block.header.parent_hash(); let parent_header = self .client - .header(BlockId::Hash(parent_hash)) + .header(parent_hash) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? .ok_or_else(|| { ConsensusError::ChainLookup( @@ -479,13 +468,10 @@ where C: HeaderBackend + HeaderMetadata, { let info = client.info(); - if info.block_gap.is_none() { - epoch_changes.clear_gap(); - } let finalized_slot = { let finalized_header = client - .header(BlockId::Hash(info.finalized_hash)) + .header(info.finalized_hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect("finalized headers must exist in db; qed"); diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index e3f9848771dc9..281025b5edcd9 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -302,18 +302,17 @@ impl TestContext { } // Propose a block - fn propose_block(&mut self, parent_id: BlockId, slot: Option) -> TestBlockImportParams { - let parent = self.client.header(&parent_id).unwrap().unwrap(); - let parent_hash = parent.hash(); - let parent_number = *parent.number(); + fn propose_block(&mut self, parent_hash: Hash, slot: Option) -> TestBlockImportParams { + let parent_header = self.client.header(parent_hash).unwrap().unwrap(); + let parent_number = *parent_header.number(); let authority = Sr25519Keyring::Alice; let keystore = create_keystore(authority); - let proposer = block_on(self.init(&parent)).unwrap(); + let proposer = block_on(self.init(&parent_header)).unwrap(); let slot = slot.unwrap_or_else(|| { - let parent_pre_digest = find_pre_digest::(&parent).unwrap(); + let parent_pre_digest = find_pre_digest::(&parent_header).unwrap(); parent_pre_digest.slot + 1 }); @@ -366,23 +365,21 @@ impl TestContext { // Propose and import a new block on top of the given parent. // This skips verification. - fn propose_and_import_block(&mut self, parent_id: BlockId, slot: Option) -> Hash { - let params = self.propose_block(parent_id, slot); + fn propose_and_import_block(&mut self, parent_hash: Hash, slot: Option) -> Hash { + let params = self.propose_block(parent_hash, slot); self.import_block(params) } // Propose and import n valid blocks that are built on top of the given parent. // The proposer takes care of producing epoch change digests according to the epoch // duration (which is set by the test runtime). - fn propose_and_import_blocks(&mut self, mut parent_id: BlockId, n: usize) -> Vec { + fn propose_and_import_blocks(&mut self, mut parent_hash: Hash, n: usize) -> Vec { let mut hashes = Vec::with_capacity(n); - for _ in 0..n { - let hash = self.propose_and_import_block(parent_id, None); + let hash = self.propose_and_import_block(parent_hash, None); hashes.push(hash); - parent_id = BlockId::Hash(hash); + parent_hash = hash; } - hashes } } @@ -509,7 +506,7 @@ fn claim_primary_slots_works() { fn import_rejects_block_without_pre_digest() { let mut env = TestContext::new(); - let mut import_params = env.propose_block(BlockId::Number(0), Some(999.into())); + let mut import_params = env.propose_block(env.client.info().genesis_hash, Some(999.into())); // Remove logs from the header import_params.header.digest_mut().logs.clear(); @@ -521,9 +518,9 @@ fn import_rejects_block_without_pre_digest() { fn import_rejects_block_with_unexpected_epoch_changes() { let mut env = TestContext::new(); - env.propose_and_import_block(BlockId::Number(0), None); + let hash1 = env.propose_and_import_block(env.client.info().genesis_hash, None); - let mut import_params = env.propose_block(BlockId::Number(1), None); + let mut import_params = env.propose_block(hash1, None); // Insert an epoch change announcement when it is not required. let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: env.link.genesis_config.authorities.clone(), @@ -543,10 +540,10 @@ fn import_rejects_block_with_unexpected_epoch_changes() { fn import_rejects_block_with_missing_epoch_changes() { let mut env = TestContext::new(); - let blocks = env.propose_and_import_blocks(BlockId::Number(0), EPOCH_DURATION as usize); + let blocks = + env.propose_and_import_blocks(env.client.info().genesis_hash, EPOCH_DURATION as usize); - let mut import_params = - env.propose_block(BlockId::Hash(blocks[EPOCH_DURATION as usize - 1]), None); + let mut import_params = env.propose_block(blocks[EPOCH_DURATION as usize - 1], None); let digest = import_params.header.digest_mut(); // Remove the epoch change announcement. @@ -560,7 +557,7 @@ fn import_rejects_block_with_missing_epoch_changes() { fn importing_block_one_sets_genesis_epoch() { let mut env = TestContext::new(); - let block_hash = env.propose_and_import_block(BlockId::Number(0), Some(999.into())); + let block_hash = env.propose_and_import_block(env.client.info().genesis_hash, Some(999.into())); let epoch_for_second_block = env.epoch_data(&block_hash, 1, 1000.into()); let genesis_epoch = Epoch::genesis(&env.link.genesis_config, 999.into()); @@ -580,11 +577,10 @@ fn allows_to_skip_epochs() { // configuration created for epoch 2. let mut env = TestContext::new(); - let blocks = env.propose_and_import_blocks(BlockId::Number(0), 7); + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); // First block after the a skipped epoch (block #8 @ slot #19) - let block = - env.propose_and_import_block(BlockId::Hash(*blocks.last().unwrap()), Some(19.into())); + let block = env.propose_and_import_block(*blocks.last().unwrap(), Some(19.into())); let epoch_changes = env.link.epoch_changes.shared_data(); let epochs: Vec<_> = epoch_changes.tree().iter().collect(); @@ -648,11 +644,11 @@ fn allows_to_skip_epochs() { fn finalization_prunes_epoch_changes_and_removes_weights() { let mut env = TestContext::new(); - let canon = env.propose_and_import_blocks(BlockId::Number(0), 21); + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); - let _fork1 = env.propose_and_import_blocks(BlockId::Hash(canon[0]), 10); - let _fork2 = env.propose_and_import_blocks(BlockId::Hash(canon[7]), 10); - let _fork3 = env.propose_and_import_blocks(BlockId::Hash(canon[11]), 8); + let _fork1 = env.propose_and_import_blocks(canon[0], 10); + let _fork2 = env.propose_and_import_blocks(canon[7], 10); + let _fork3 = env.propose_and_import_blocks(canon[11], 8); let epoch_changes = env.link.epoch_changes.clone(); @@ -679,7 +675,7 @@ fn finalization_prunes_epoch_changes_and_removes_weights() { // Finalize block #10 so that on next epoch change the tree is pruned env.client.finalize_block(canon[13], None, true).unwrap(); - let canon_tail = env.propose_and_import_blocks(BlockId::Hash(*canon.last().unwrap()), 4); + let canon_tail = env.propose_and_import_blocks(*canon.last().unwrap(), 4); // Post-finalize scenario. // @@ -698,10 +694,10 @@ fn finalization_prunes_epoch_changes_and_removes_weights() { fn revert_prunes_epoch_changes_and_removes_weights() { let mut env = TestContext::new(); - let canon = env.propose_and_import_blocks(BlockId::Number(0), 21); - let fork1 = env.propose_and_import_blocks(BlockId::Hash(canon[0]), 10); - let fork2 = env.propose_and_import_blocks(BlockId::Hash(canon[7]), 10); - let fork3 = env.propose_and_import_blocks(BlockId::Hash(canon[11]), 8); + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); + let fork1 = env.propose_and_import_blocks(canon[0], 10); + let fork2 = env.propose_and_import_blocks(canon[7], 10); + let fork3 = env.propose_and_import_blocks(canon[11], 8); let epoch_changes = env.link.epoch_changes.clone(); @@ -773,7 +769,7 @@ fn revert_prunes_epoch_changes_and_removes_weights() { fn revert_not_allowed_for_finalized() { let mut env = TestContext::new(); - let canon = env.propose_and_import_blocks(BlockId::Number(0), 3); + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 3); // Finalize best block env.client.finalize_block(canon[2], None, false).unwrap(); @@ -793,9 +789,9 @@ fn revert_not_allowed_for_finalized() { fn verify_block_claimed_via_secondary_method() { let mut env = TestContext::new(); - let blocks = env.propose_and_import_blocks(BlockId::Number(0), 7); + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); - let in_params = env.propose_block(BlockId::Hash(blocks[6]), Some(9.into())); + let in_params = env.propose_block(blocks[6], Some(9.into())); let _out_params = env.verify_block(in_params); } @@ -882,8 +878,8 @@ impl TestNetFactory for SassafrasTestNet { } // Multiple nodes authoring and validating blocks -#[test] -fn sassafras_network_progress() { +#[tokio::test] +async fn sassafras_network_progress() { let net = SassafrasTestNet::new(3); let net = Arc::new(Mutex::new(net)); @@ -931,13 +927,18 @@ fn sassafras_network_progress() { .for_each(|_| future::ready(())); import_notifications.push(import_futures); - let slot_duration = data.link.genesis_config.slot_duration(); - let create_inherent_data_providers = Box::new(move |_, _| async move { - let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - slot_duration, - ); - Ok((slot,)) + //let slot_duration = data.link.genesis_config.slot_duration(); + let client_clone = client.clone(); + let create_inherent_data_providers = Box::new(move |parent, _| { + // Get the slot of the parent header and just increase this slot. + // + // Below we will running everything in one big future. If we would use + // time based slot, it can happen that on babe instance imports a block from + // another babe instance and then tries to build a block in the same slot making + // this test fail. + let parent_header = client_clone.header(parent).ok().flatten().unwrap(); + let slot = Slot::from(find_pre_digest::(&parent_header).unwrap().slot + 1); + async move { Ok((InherentDataProvider::new(slot),)) } }); let sassafras_params = SassafrasWorkerParams { client: client.clone(), @@ -955,7 +956,7 @@ fn sassafras_network_progress() { sassafras_workers.push(sassafras_worker); } - block_on(future::select( + future::select( futures::future::poll_fn(move |cx| { let mut net = net.lock(); net.poll(cx); @@ -967,5 +968,6 @@ fn sassafras_network_progress() { Poll::<()>::Pending }), future::select(future::join_all(import_notifications), future::join_all(sassafras_workers)), - )); + ) + .await; } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 70c235ff7825e..50cc11ffce94e 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -405,6 +405,7 @@ where // TODO-SASS-P3 :??? DOC let mut inherent_data = create_inherent_data_providers .create_inherent_data() + .await .map_err(Error::::CreateInherents)?; inherent_data.sassafras_replace_inherent_data(pre_digest.slot); self.check_inherents( diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 54d5dec64fa33..f37df5eca9dfe 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -326,6 +326,7 @@ pub mod pallet { /// Submit next epoch tickets. /// /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remove the weight? + #[pallet::call_index(0)] #[pallet::weight(10_000)] pub fn submit_tickets( origin: OriginFor, @@ -357,6 +358,7 @@ pub mod pallet { /// not been enacted yet. /// /// TODO-SASS-P4: proper weight + #[pallet::call_index(1)] #[pallet::weight(10_000)] pub fn plan_config_change( origin: OriginFor, @@ -382,6 +384,7 @@ pub mod pallet { /// will be defined as the equivocation reporter. /// /// TODO-SASS-P4: proper weight + #[pallet::call_index(2)] #[pallet::weight(10_000)] pub fn report_equivocation_unsigned( origin: OriginFor, diff --git a/primitives/consensus/sassafras/src/inherents.rs b/primitives/consensus/sassafras/src/inherents.rs index 6af6e4b4732c6..d6254a80a16e8 100644 --- a/primitives/consensus/sassafras/src/inherents.rs +++ b/primitives/consensus/sassafras/src/inherents.rs @@ -87,7 +87,7 @@ impl sp_std::ops::Deref for InherentDataProvider { #[cfg(feature = "std")] #[async_trait::async_trait] impl sp_inherents::InherentDataProvider for InherentDataProvider { - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) } From 60410fbd9800ebcc716334c0e124f260dab6a905 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 24 Jan 2023 14:50:35 +0100 Subject: [PATCH 18/62] Pick warp sync fix from babe --- .../consensus/sassafras/src/block_import.rs | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index bf00ab3c6ecc8..f10b8bb033817 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -326,17 +326,25 @@ where ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); + let info = self.client.info(); + + let block_status = self + .client + .status(hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - // Early exit if block already in chain, otherwise the check for epoch changes - // will error when trying to re-import - match self.client.status(hash) { - Ok(BlockStatus::InChain) => { - block.remove_intermediate::>(INTERMEDIATE_KEY)?; - block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); - return self.inner.import_block(block, new_cache).await.map_err(Into::into) - }, - Ok(BlockStatus::Unknown) => {}, - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + // Skip protocol-specific logic if block already on-chain or importing blocks + // during initial sync, otherwise the check for epoch changes will error + // because trying to re-import an epoch change entry or because of missing epoch + // data in the tree, respectivelly. + if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + block_status == BlockStatus::InChain + { + // When re-importing existing block strip away intermediates. + // In case of initial sync intermediates should not be present... + let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) } if block.with_state() { From 95ba593adb8a229460c1aeade2d3048eec933c0b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 24 Jan 2023 17:41:57 +0100 Subject: [PATCH 19/62] Fix sassafras node binary after master merge --- bin/node-sassafras/runtime/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 95b38ffa718c2..4a995d21c1a80 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -30,8 +30,8 @@ use frame_support::{ construct_runtime, parameter_types, traits::{ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem}, weights::{ - constants::{RocksDbWeight, WEIGHT_PER_SECOND}, - IdentityFee, + constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, + IdentityFee, Weight, }, }; @@ -159,9 +159,9 @@ parameter_types! { /// We allow for 2 seconds of compute with a 3 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - (2_u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), - NORMAL_DISPATCH_RATIO, - ); + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + NORMAL_DISPATCH_RATIO, + ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; @@ -380,7 +380,7 @@ impl_runtime_apis! { impl sp_consensus_sassafras::SassafrasApi for Runtime { fn submit_tickets_unsigned_extrinsic( - tickets: Vec + tickets: Vec ) -> bool { Sassafras::submit_tickets_unsigned_extrinsic(tickets) } From 7f224ff8a9bb7a1594136aef7a311e6dbec60510 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 22 Feb 2023 10:50:07 +0100 Subject: [PATCH 20/62] Fix after master merge --- bin/node-sassafras/node/src/service.rs | 4 ++-- bin/node-sassafras/runtime/src/lib.rs | 23 ++++++++++++++++-- client/consensus/sassafras/src/authorship.rs | 9 ++++--- .../consensus/sassafras/src/block_import.rs | 5 ++-- client/consensus/sassafras/src/lib.rs | 4 ++-- .../consensus/sassafras/src/verification.rs | 24 +++++++++---------- 6 files changed, 43 insertions(+), 26 deletions(-) diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 3e6a6d96e2eb5..4c0edc43c77fa 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -5,7 +5,7 @@ use sc_client_api::BlockBackend; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use std::{sync::Arc, time::Duration}; @@ -211,7 +211,7 @@ pub fn new_full(mut config: Configuration) -> Result spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, - warp_sync: Some(warp_sync), + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), })?; if config.offchain_worker.enabled { diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 4a995d21c1a80..c999f3ceaf78c 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -199,12 +199,12 @@ impl frame_system::Config for Runtime { impl pallet_sassafras::Config for Runtime { type SlotDuration = ConstU64; type EpochDuration = ConstU64; + type MaxAuthorities = ConstU32; + type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; #[cfg(feature = "use-session-pallet")] type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; #[cfg(not(feature = "use-session-pallet"))] type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; - type MaxAuthorities = ConstU32; - type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; } impl pallet_grandpa::Config for Runtime { @@ -219,6 +219,7 @@ impl pallet_grandpa::Config for Runtime { type HandleEquivocation = (); type WeightInfo = (); type MaxAuthorities = ConstU32; + type MaxSetIdSessionEntries = ConstU64<0>; } impl pallet_timestamp::Config for Runtime { @@ -466,12 +467,21 @@ impl_runtime_apis! { ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } + fn query_fee_details( uxt: ::Extrinsic, len: u32, ) -> pallet_transaction_payment::FeeDetails { TransactionPayment::query_fee_details(uxt, len) } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { @@ -481,12 +491,21 @@ impl_runtime_apis! { ) -> pallet_transaction_payment::RuntimeDispatchInfo { TransactionPayment::query_call_info(call, len) } + fn query_call_fee_details( call: RuntimeCall, len: u32, ) -> pallet_transaction_payment::FeeDetails { TransactionPayment::query_call_fee_details(call, len) } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 282866e607e5d..175efdf96aa8e 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -229,8 +229,7 @@ where debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); // Get the next slot ticket from the runtime. - let block_id = BlockId::Hash(parent_header.hash()); - let ticket = self.client.runtime_api().slot_ticket(&block_id, slot).ok()?; + let ticket = self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; // TODO-SASS-P2: remove me debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); @@ -419,15 +418,15 @@ async fn start_tickets_worker( } // Get the best block on which we will publish the tickets. - let best_id = match select_chain.best_chain().await { - Ok(header) => BlockId::Hash(header.hash()), + let best_hash = match select_chain.best_chain().await { + Ok(header) => header.hash(), Err(err) => { error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); continue }, }; - let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(&best_id, tickets) { + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(best_hash, tickets) { Err(err) => Some(err.to_string()), Ok(false) => Some("Unknown reason".to_string()), _ => None, diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index f10b8bb033817..004bd119be9d1 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -282,11 +282,10 @@ where }; // Read epoch info from the imported state - let block_id = BlockId::Hash(hash); - let curr_epoch = self.client.runtime_api().current_epoch(&block_id).map_err(|e| { + let curr_epoch = self.client.runtime_api().current_epoch(hash).map_err(|e| { ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) })?; - let next_epoch = self.client.runtime_api().next_epoch(&block_id).map_err(|e| { + let next_epoch = self.client.runtime_api().next_epoch(hash).map_err(|e| { ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) })?; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 88a2aa3f80ced..07e5c480e7867 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -70,7 +70,7 @@ use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, + generic::OpaqueDigestItemId, traits::{Block as BlockT, Header, NumberFor, One, Zero}, DigestItem, }; @@ -264,7 +264,7 @@ where info.genesis_hash }); - let epoch = client.runtime_api().current_epoch(&BlockId::Hash(hash))?; + let epoch = client.runtime_api().current_epoch(hash)?; Ok(epoch.config) } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 50cc11ffce94e..8dabbf3ed937a 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -172,7 +172,7 @@ where async fn check_inherents( &self, block: Block, - block_id: BlockId, + at_hash: Block::Hash, inherent_data: InherentData, create_inherent_data_providers: CIDP::InherentDataProviders, execution_context: ExecutionContext, @@ -180,7 +180,7 @@ where let inherent_res = self .client .runtime_api() - .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .check_inherents_with_context(at_hash, execution_context, block, inherent_data) .map_err(Error::RuntimeApi)?; if !inherent_res.ok() { @@ -232,11 +232,11 @@ where ); // Get the best block on which we will build and send the equivocation report. - let best_id = self + let best_hash = self .select_chain .best_chain() .await - .map(|h| BlockId::Hash(h.hash())) + .map(|h| h.hash()) .map_err(|e| Error::Client(e.into()))?; // Generate a key ownership proof. We start by trying to generate the key owernship proof @@ -246,17 +246,17 @@ where // happens on the first block of the session, in which case its parent would be on the // previous session. If generation on the parent header fails we try with best block as // well. - let generate_key_owner_proof = |block_id: &BlockId| { + let generate_key_owner_proof = |at_hash: Block::Hash| { self.client .runtime_api() - .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) + .generate_key_ownership_proof(at_hash, slot, equivocation_proof.offender.clone()) .map_err(Error::RuntimeApi) }; - let parent_id = BlockId::Hash(*header.parent_hash()); - let key_owner_proof = match generate_key_owner_proof(&parent_id)? { + let parent_hash = *header.parent_hash(); + let key_owner_proof = match generate_key_owner_proof(parent_hash)? { Some(proof) => proof, - None => match generate_key_owner_proof(&best_id)? { + None => match generate_key_owner_proof(best_hash)? { Some(proof) => proof, None => { debug!(target: "babe", "Equivocation offender is not part of the authority set."); @@ -269,7 +269,7 @@ where self.client .runtime_api() .submit_report_equivocation_unsigned_extrinsic( - &best_id, + best_hash, equivocation_proof, key_owner_proof, ) @@ -360,7 +360,7 @@ where let ticket = self .client .runtime_api() - .slot_ticket(&BlockId::Hash(parent_hash), pre_digest.slot) + .slot_ticket(parent_hash, pre_digest.slot) .ok() .unwrap_or_else(|| None); @@ -410,7 +410,7 @@ where inherent_data.sassafras_replace_inherent_data(pre_digest.slot); self.check_inherents( new_block.clone(), - BlockId::Hash(parent_hash), + parent_hash, inherent_data, create_inherent_data_providers, block.origin.into(), From be6974b95eac97b717c60e0d2c45828164b7829c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 27 Mar 2023 11:39:55 +0200 Subject: [PATCH 21/62] Fix after master merge --- Cargo.lock | 166 ++++++++++++++++++ bin/node-sassafras/node/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 77 ++++---- .../consensus/sassafras/src/block_import.rs | 10 +- client/consensus/sassafras/src/lib.rs | 9 +- client/consensus/sassafras/src/tests.rs | 126 ++++++------- .../consensus/sassafras/src/verification.rs | 9 +- frame/sassafras/src/lib.rs | 1 - primitives/consensus/sassafras/src/lib.rs | 1 + test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/src/lib.rs | 12 ++ 11 files changed, 291 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33bca4005483c..9b1b360c714cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3033,6 +3033,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + [[package]] name = "hkdf" version = "0.12.3" @@ -5149,6 +5155,83 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.1-dev" +dependencies = [ + "clap 4.1.8", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-executor", + "sc-keystore", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.1-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -6642,6 +6725,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.1-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -8784,6 +8888,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.1-dev" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "schnorrkel", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -10329,6 +10472,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.1-dev" +dependencies = [ + "async-trait", + "merlin", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -11204,6 +11368,7 @@ dependencies = [ "memory-db", "pallet-babe", "pallet-beefy-mmr", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -11219,6 +11384,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-inherents", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 72845823eaeb9..0a5eeb17e9ace 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -30,8 +30,8 @@ sc-consensus-sassafras = { version = "0.3.1-dev", path = "../../../client/consen sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } +sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 175efdf96aa8e..13a5563c608e1 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -44,7 +44,7 @@ pub(crate) fn claim_slot( slot: Slot, epoch: &Epoch, ticket: Option, - keystore: &SyncCryptoStorePtr, + keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { let config = &epoch.config; @@ -69,14 +69,10 @@ pub(crate) fn claim_slot( let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_idx); - let signature = SyncCryptoStore::sr25519_vrf_sign( - &**keystore, - AuthorityId::ID, - authority_id.as_ref(), - transcript_data, - ) - .ok() - .flatten()?; + let signature = keystore + .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), transcript_data) + .ok() + .flatten()?; let pre_digest = PreDigest { authority_idx, @@ -92,7 +88,7 @@ pub(crate) fn claim_slot( /// Generate the tickets for the given epoch. /// Tickets additional information will be stored within the `Epoch` structure. /// The additional information will be used later during session to claim slots. -fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> Vec { +fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; let redundancy_factor = config.threshold_params.redundancy_factor; @@ -109,8 +105,7 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> V let authorities = config.authorities.iter().enumerate().map(|(index, a)| (index, &a.0)); for (authority_idx, authority_id) in authorities { - if !SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) - { + if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { continue } @@ -120,13 +115,9 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &SyncCryptoStorePtr) -> V // TODO-SASS-P4: can be a good idea to replace `vrf_sign` with `vrf_sign_after_check`, // But we need to modify the CryptoStore interface first. - let signature = SyncCryptoStore::sr25519_vrf_sign( - &**keystore, - AuthorityId::ID, - authority_id.as_ref(), - transcript_data.clone(), - ) - .ok()??; + let signature = keystore + .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), transcript_data.clone()) + .ok()??; let ticket = VRFOutput(signature.output); if !sp_consensus_sassafras::check_threshold(&ticket, threshold) { @@ -163,7 +154,7 @@ struct SlotWorker { sync_oracle: SO, justification_sync_link: L, force_authoring: bool, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, genesis_config: SassafrasConfiguration, @@ -187,7 +178,7 @@ where type SyncOracle = SO; type JustificationSyncLink = L; type CreateProposer = - Pin> + Send + 'static>>; + Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; type AuxData = ViableEpochDescriptor, Epoch>; @@ -210,7 +201,7 @@ where slot, ) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + .ok_or(ConsensusError::InvalidAuthoritiesSet) } fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { @@ -282,28 +273,28 @@ where epoch_descriptor: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error, + ConsensusError, > { - // Sign the pre-sealed hash of the block and then add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*self.keystore, - ::ID, - &public_type_pair, - header_hash.as_ref(), - ) - .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? - .ok_or_else(|| { - sp_consensus::Error::CannotSign( - public.clone(), - "Could not find key in keystore.".into(), + let signature = self + .keystore + .sign_with( + ::ID, + ::CRYPTO_ID, + public.as_ref(), + header_hash.as_ref(), ) - })?; + .map_err(|e| ConsensusError::CannotSign(format!("{}. Key {:?}", e, public)))? + .ok_or_else(|| { + ConsensusError::CannotSign(format!( + "Could not find key in keystore. Key {:?}", + public + )) + })?; let signature: AuthoritySignature = signature .clone() .try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + .map_err(|_| ConsensusError::InvalidSignature(signature, public.to_raw_vec()))?; + let digest_item = ::sassafras_seal(signature); let mut block = BlockImportParams::new(BlockOrigin::Own, header); @@ -338,7 +329,7 @@ where Box::pin( self.env .init(block) - .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e))), ) } @@ -373,7 +364,7 @@ where /// and are volatile. async fn start_tickets_worker( client: Arc, - keystore: SyncCryptoStorePtr, + keystore: KeystorePtr, epoch_changes: SharedEpochChanges, select_chain: SC, ) where @@ -487,7 +478,7 @@ pub struct SassafrasWorkerParams { /// The client to use pub client: Arc, /// The keystore that manages the keys of the node. - pub keystore: SyncCryptoStorePtr, + pub keystore: KeystorePtr, /// The chain selection strategy pub select_chain: SC, /// The environment we are producing blocks for. @@ -522,7 +513,7 @@ pub fn start_sassafras( force_authoring, sassafras_link, }: SassafrasWorkerParams, -) -> Result, sp_consensus::Error> +) -> Result, ConsensusError> where B: BlockT, C: ProvideRuntimeApi diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 004bd119be9d1..4b54439be4f6a 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -251,7 +251,6 @@ where async fn import_state( &mut self, mut block: BlockImportParams>, - new_cache: HashMap>, ) -> Result { let hash = block.post_hash(); let parent_hash = *block.header.parent_hash(); @@ -271,7 +270,7 @@ where }); // First make the client import the state - let aux = match self.inner.import_block(block, new_cache).await { + let aux = match self.inner.import_block(block).await { Ok(ImportResult::Imported(aux)) => aux, Ok(r) => return Err(ConsensusError::ClientImport(format!( @@ -321,7 +320,6 @@ where async fn import_block( &mut self, mut block: BlockImportParams, - new_cache: HashMap>, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); @@ -343,11 +341,11 @@ where // In case of initial sync intermediates should not be present... let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); - return self.inner.import_block(block, new_cache).await.map_err(Into::into) + return self.inner.import_block(block).await.map_err(Into::into) } if block.with_state() { - return self.import_state(block, new_cache).await + return self.import_state(block).await } let viable_epoch_desc = block @@ -443,7 +441,7 @@ where let is_new_best = self.is_new_best(total_weight, number, parent_hash)?; block.fork_choice = Some(ForkChoiceStrategy::Custom(is_new_best)); - let import_result = self.inner.import_block(block, new_cache).await; + let import_result = self.inner.import_block(block).await; // Revert to the original epoch changes in case there's an error // importing the block diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 07e5c480e7867..3b6a37d38e18a 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -25,7 +25,7 @@ #![forbid(unsafe_code, missing_docs)] use std::{ - collections::{BTreeMap, HashMap}, + collections::BTreeMap, future::Future, sync::Arc, task::{Context, Poll}, @@ -58,17 +58,16 @@ use sc_consensus_epochs::{ use sc_consensus_slots::{CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::AppKey; +use sp_application_crypto::AppCrypto; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus::{ - BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain, - SyncOracle, + BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; use sp_consensus_slots::Slot; use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::KeystorePtr; use sp_runtime::{ generic::OpaqueDigestItemId, traits::{Block as BlockT, Header, NumberFor, One, Zero}, diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 281025b5edcd9..0beb1391706c6 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -36,15 +36,13 @@ use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_sassafras::{inherents::InherentDataProvider, vrf::make_slot_transcript_data}; use sp_keyring::Sr25519Keyring; -use sp_keystore::testing::KeyStore as TestKeyStore; +use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{Digest, DigestItem}; use sp_timestamp::Timestamp; use substrate_test_runtime_client::{runtime::Block as TestBlock, Backend as TestBackend}; -// Monomorphization of generic structures for the test context. - -type BlockId = crate::BlockId; +// Monomorphization of generic structures for test context. type TestHeader = ::Header; @@ -65,7 +63,7 @@ type TestBlockImportParams = BlockImportParams; type TestViableEpochDescriptor = sc_consensus_epochs::ViableEpochDescriptor; -// Monomorphization of Sassafras structures for the test context. +// Monomorphization of Sassafras structures for test context. type SassafrasIntermediate = crate::SassafrasIntermediate; @@ -121,10 +119,8 @@ impl Proposer for TestProposer { _: Duration, _: Option, ) -> Self::Proposal { - let block_builder = self - .client - .new_block_at(&BlockId::Hash(self.parent_hash), inherent_digests, false) - .unwrap(); + let block_builder = + self.client.new_block_at(self.parent_hash, inherent_digests, false).unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, @@ -176,6 +172,7 @@ struct TestContext { link: SassafrasLink, block_import: SassafrasBlockImport, verifier: SassafrasVerifier, + keystore: KeystorePtr, } fn create_test_verifier( @@ -213,6 +210,26 @@ fn create_test_block_import( .expect("can initialize block-import") } +fn create_test_keystore(authority: Sr25519Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); + keystore.sr25519_generate_new(SASSAFRAS, Some(&authority.to_seed())).unwrap(); + keystore.into() +} + +fn create_test_config() -> SassafrasConfiguration { + SassafrasConfiguration { + slot_duration: SLOT_DURATION, + epoch_duration: EPOCH_DURATION, + authorities: vec![ + (Sr25519Keyring::Alice.public().into(), 1), + (Sr25519Keyring::Bob.public().into(), 1), + (Sr25519Keyring::Charlie.public().into(), 1), + ], + randomness: [0; 32], + threshold_params: SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, + } +} + impl TestContext { fn new() -> Self { let (client, backend) = TestClientBuilder::with_default_backend().build_with_backend(); @@ -225,20 +242,24 @@ impl TestContext { let (block_import, link) = create_test_block_import(client.clone(), config.clone()); + // Create a keystore with default testing key + let keystore = create_test_keystore(Sr25519Keyring::Alice); + let verifier = create_test_verifier(client.clone(), &link, config.clone()); - Self { client, backend, link, block_import, verifier } + Self { client, backend, link, block_import, verifier, keystore } } // This is a bit hacky solution to use `TestContext` as an `Environment` implementation - fn new_with_pre_built_data( + fn new_with_pre_built_components( client: Arc, backend: Arc, link: SassafrasLink, block_import: SassafrasBlockImport, + keystore: KeystorePtr, ) -> Self { let verifier = create_test_verifier(client.clone(), &link, link.genesis_config.clone()); - Self { client, backend, link, block_import, verifier } + Self { client, backend, link, block_import, verifier, keystore } } fn import_block(&mut self, mut params: TestBlockImportParams) -> Hash { @@ -253,7 +274,7 @@ impl TestContext { } } - match block_on(self.block_import.import_block(params, Default::default())).unwrap() { + match block_on(self.block_import.import_block(params)).unwrap() { ImportResult::Imported(_) => (), _ => panic!("expected block to be imported"), } @@ -263,7 +284,7 @@ impl TestContext { fn verify_block(&mut self, params: TestBlockImportParams) -> TestBlockImportParams { let tmp_params = params.clear_storage_changes_and_mutate(); - let (tmp_params, _) = block_on(self.verifier.verify(tmp_params)).unwrap(); + let tmp_params = block_on(self.verifier.verify(tmp_params)).unwrap(); tmp_params.clear_storage_changes_and_mutate() } @@ -306,8 +327,7 @@ impl TestContext { let parent_header = self.client.header(parent_hash).unwrap().unwrap(); let parent_number = *parent_header.number(); - let authority = Sr25519Keyring::Alice; - let keystore = create_keystore(authority); + let public = self.keystore.sr25519_public_keys(SASSAFRAS)[0]; let proposer = block_on(self.init(&parent_header)).unwrap(); @@ -319,14 +339,11 @@ impl TestContext { let epoch = self.epoch_data(&parent_hash, parent_number, slot); let transcript_data = make_slot_transcript_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); - let signature = SyncCryptoStore::sr25519_vrf_sign( - &*keystore, - SASSAFRAS, - &authority.public(), - transcript_data, - ) - .unwrap() - .unwrap(); + let signature = self + .keystore + .sr25519_vrf_sign(SASSAFRAS, &public, transcript_data) + .unwrap() + .unwrap(); let pre_digest = PreDigest { slot, @@ -345,13 +362,13 @@ impl TestContext { // Sign the pre-sealed hash of the block and then add it to a digest item. let hash = block.header.hash(); - let public_type_pair = authority.public().into(); - let signature = - SyncCryptoStore::sign_with(&*keystore, SASSAFRAS, &public_type_pair, hash.as_ref()) - .unwrap() - .unwrap() - .try_into() - .unwrap(); + let signature = self + .keystore + .sr25519_sign(SASSAFRAS, &public, hash.as_ref()) + .unwrap() + .unwrap() + .try_into() + .unwrap(); let seal = DigestItem::sassafras_seal(signature); block.header.digest_mut().push(seal); @@ -384,37 +401,19 @@ impl TestContext { } } -fn create_keystore(authority: Sr25519Keyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(TestKeyStore::new()); - SyncCryptoStore::sr25519_generate_new(&*keystore, SASSAFRAS, Some(&authority.to_seed())) - .expect("Creates authority key"); - keystore -} - +// Check that protocol config returned by the runtime interface is equal to the expected one #[test] fn tests_assumptions_sanity_check() { let env = TestContext::new(); let config = env.link.genesis_config; + let test_config = create_test_config(); - // Check that genesis configuration read from test runtime has the expected values - assert_eq!( - config.authorities, - vec![ - (Sr25519Keyring::Alice.public().into(), 1), - (Sr25519Keyring::Bob.public().into(), 1), - (Sr25519Keyring::Charlie.public().into(), 1), - ] - ); - assert_eq!(config.epoch_duration, EPOCH_DURATION); - assert_eq!(config.slot_duration, SLOT_DURATION); - assert_eq!(config.randomness, [0; 32]); - // TODO-SASS-P3: check threshold params + assert_eq!(config, test_config); } #[test] fn claim_secondary_slots_works() { - let env = TestContext::new(); - let mut config = env.link.genesis_config.clone(); + let mut config = create_test_config(); config.randomness = [2; 32]; let authorities = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; @@ -429,7 +428,7 @@ fn claim_secondary_slots_works() { let mut assignments = vec![usize::MAX; config.epoch_duration as usize]; for (auth_idx, auth_id) in authorities.iter().enumerate() { - let keystore = create_keystore(*auth_id); + let keystore = create_test_keystore(*auth_id); for slot in 0..config.epoch_duration { if let Some((claim, auth_id2)) = @@ -453,12 +452,11 @@ fn claim_secondary_slots_works() { #[test] fn claim_primary_slots_works() { - // Here the test is very deterministic. + // Here the test is deterministic. // If a node has in its epoch `tickets_aux` the information corresponding to the // ticket that is presented. Then the claim ticket should just return the // ticket auxiliary information. - let env = TestContext::new(); - let mut config = env.link.genesis_config.clone(); + let mut config = create_test_config(); config.randomness = [2; 32]; let mut epoch = Epoch { @@ -468,7 +466,7 @@ fn claim_primary_slots_works() { tickets_aux: Default::default(), }; - let keystore = create_keystore(Sr25519Keyring::Alice); + let keystore = create_test_keystore(Sr25519Keyring::Alice); // Success if we have ticket data and the key in our keystore @@ -800,6 +798,7 @@ fn verify_block_claimed_via_secondary_method() { // More complex tests involving communication between multiple nodes. // // These tests are performed via a specially crafted test network. +// Closer to integration test than unit tests... //================================================================================================= impl Environment for TestContext { @@ -872,6 +871,10 @@ impl TestNetFactory for SassafrasTestNet { &self.peers } + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } @@ -894,16 +897,15 @@ async fn sassafras_network_progress() { let client = peer.client().as_client(); let backend = peer.client().as_backend(); let select_chain = peer.select_chain().expect("Full client has select_chain"); - - let keystore = create_keystore(*auth_id); - + let keystore = create_test_keystore(*auth_id); let data = peer.data.as_ref().expect("sassafras link set up during initialization"); - let env = TestContext::new_with_pre_built_data( + let env = TestContext::new_with_pre_built_components( client.clone(), backend.clone(), data.link.clone(), data.block_import.clone(), + keystore.clone(), ); // Run the imported block number is less than five and we don't receive a block produced diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 8dabbf3ed937a..4ede6b7bb7d96 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -281,9 +281,6 @@ where } } -type BlockVerificationResult = - Result<(BlockImportParams, Option)>>), String>; - #[async_trait::async_trait] impl Verifier for SassafrasVerifier @@ -303,7 +300,7 @@ where async fn verify( &mut self, mut block: BlockImportParams, - ) -> BlockVerificationResult { + ) -> Result, String> { trace!( target: "sassafras", "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", @@ -321,7 +318,7 @@ where // Just insert a tag to notify that this is indeed a Sassafras block to the // `BlockImport` implementation. block.insert_intermediate(INTERMEDIATE_KEY, ()); - return Ok((block, Default::default())) + return Ok(block) } let hash = block.header.hash(); @@ -438,7 +435,7 @@ where SassafrasIntermediate:: { epoch_descriptor }, ); - Ok((block, Default::default())) + Ok(block) }, CheckedHeader::Deferred(a, b) => { debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index f37df5eca9dfe..36bce7d08957d 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -99,7 +99,6 @@ pub mod pallet { /// The Sassafras pallet. #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); /// Configuration parameters. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 4a09530eb60f3..b3079e01197ce 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -115,6 +115,7 @@ pub struct Epoch { } /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. +// TODO-SASS-P3: rename to something better... like LotteryConfig #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct SassafrasEpochConfiguration { diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index c3ac76b099b36..cb5161a1d43c5 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,7 +18,7 @@ sp-application-crypto = { version = "7.0.0", default-features = false, path = ". sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } -sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 5ea7945ace7e7..66c6647b6c21f 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -978,22 +978,34 @@ cfg_if! { } fn current_epoch() -> sp_consensus_sassafras::Epoch { + // Defaults let authorities = system::authorities().into_iter().map(|x| { let authority: sr25519::Public = x.into(); (SassafrasId::from(authority), 1) }).collect(); + let params = sp_consensus_sassafras::SassafrasEpochConfiguration { + redundancy_factor: 1, + attempts_number: 32, + }; let mut epoch = >::current_epoch(); epoch.config.authorities = authorities; + epoch.config.threshold_params = params; epoch } fn next_epoch() -> sp_consensus_sassafras::Epoch { + // Defaults let authorities = system::authorities().into_iter().map(|x| { let authority: sr25519::Public = x.into(); (SassafrasId::from(authority), 1) }).collect(); + let params = sp_consensus_sassafras::SassafrasEpochConfiguration { + redundancy_factor: 1, + attempts_number: 32, + }; let mut epoch = >::next_epoch(); epoch.config.authorities = authorities; + epoch.config.threshold_params = params; epoch } From 356f3d09bb3acf7c395eb7db83cac82f52834553 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 27 Mar 2023 12:28:50 +0200 Subject: [PATCH 22/62] Small refactory --- bin/node-sassafras/node/src/chain_spec.rs | 2 +- bin/node-sassafras/node/src/command.rs | 2 +- bin/node-sassafras/node/src/service.rs | 60 ++++++++--------------- bin/node-sassafras/runtime/src/lib.rs | 24 +++++---- client/consensus/babe/src/tests.rs | 1 - client/consensus/sassafras/src/lib.rs | 5 +- client/consensus/sassafras/src/tests.rs | 6 +-- 7 files changed, 44 insertions(+), 56 deletions(-) diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 965fc197277c8..6ba72c5397715 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -5,9 +5,9 @@ use node_sassafras_runtime::{ #[cfg(feature = "use-session-pallet")] use node_sassafras_runtime::{SessionConfig, SessionKeys}; use sc_service::ChainType; +use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; use sp_core::{sr25519, Pair, Public}; -use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; // Genesis constants for Sassafras parameters configuration. diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index fad50283d2440..cc8f1a39ec634 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -98,7 +98,7 @@ pub fn run() -> sc_cli::Result<()> { service::new_partial(&config)?; let aux_revert = Box::new(|client, backend, blocks| { sc_consensus_sassafras::revert(backend, blocks)?; - sc_finality_grandpa::revert(client, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; Ok(()) }); Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 4c0edc43c77fa..aea061760b18a 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -2,9 +2,8 @@ use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::BlockBackend; +use sc_consensus_grandpa::SharedVoterState; pub use sc_executor::NativeElseWasmExecutor; -use sc_finality_grandpa::SharedVoterState; -use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use std::{sync::Arc, time::Duration}; @@ -35,7 +34,7 @@ type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = - sc_finality_grandpa::GrandpaBlockImport; + sc_consensus_grandpa::GrandpaBlockImport; pub fn new_partial( config: &Configuration, @@ -54,17 +53,13 @@ pub fn new_partial( FullClient, FullGrandpaBlockImport, >, - sc_finality_grandpa::LinkHalf, + sc_consensus_grandpa::LinkHalf, sc_consensus_sassafras::SassafrasLink, ), ), >, ServiceError, > { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())) - } - let telemetry = config .telemetry_endpoints .clone() @@ -106,7 +101,7 @@ pub fn new_partial( client.clone(), ); - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), @@ -116,7 +111,7 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( - sc_consensus_sassafras::configuration(&*client)?, + sc_consensus_sassafras::finalized_configuration(&*client)?, grandpa_block_import, client.clone(), )?; @@ -159,10 +154,6 @@ pub fn new_partial( }) } -fn remote_keystore(_url: &String) -> Result, &'static str> { - Err("Remote Keystore not supported.") -} - /// Builds a new service for a full client. pub fn new_full(mut config: Configuration) -> Result { let sc_service::PartialComponents { @@ -170,7 +161,7 @@ pub fn new_full(mut config: Configuration) -> Result backend, mut task_manager, import_queue, - mut keystore_container, + keystore_container, select_chain, transaction_pool, other: (mut telemetry, import_setup), @@ -178,17 +169,7 @@ pub fn new_full(mut config: Configuration) -> Result let (block_import, grandpa_link, sassafras_link) = import_setup; - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))), - }; - } - let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name( + let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), &config.chain_spec, ); @@ -196,14 +177,14 @@ pub fn new_full(mut config: Configuration) -> Result config .network .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone())); - let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + .push(sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone())); + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -243,13 +224,14 @@ pub fn new_full(mut config: Configuration) -> Result sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, tx_handler_controller, + sync_service: sync_service.clone(), config, telemetry: telemetry.as_mut(), })?; @@ -267,13 +249,13 @@ pub fn new_full(mut config: Configuration) -> Result let sassafras_params = sc_consensus_sassafras::SassafrasWorkerParams { client: client.clone(), - keystore: keystore_container.sync_keystore(), + keystore: keystore_container.keystore(), select_chain, env: proposer, block_import, sassafras_link, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), force_authoring, create_inherent_data_providers: move |_, _| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); @@ -300,10 +282,9 @@ pub fn new_full(mut config: Configuration) -> Result // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + let keystore = role.is_authority().then(|| keystore_container.keystore()); - let grandpa_config = sc_finality_grandpa::Config { + let grandpa_config = sc_consensus_grandpa::Config { gossip_duration: Duration::from_millis(333), justification_period: 512, name: Some(name), @@ -321,11 +302,12 @@ pub fn new_full(mut config: Configuration) -> Result // and vote data availability than the observer. The observer has not // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { + let grandpa_config = sc_consensus_grandpa::GrandpaParams { config: grandpa_config, link: grandpa_link, network, - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + sync: Arc::new(sync_service), + voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), telemetry: telemetry.as_ref().map(|x| x.handle()), @@ -336,7 +318,7 @@ pub fn new_full(mut config: Configuration) -> Result task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", None, - sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, + sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, ); } diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index c999f3ceaf78c..9b957df765667 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -28,7 +28,7 @@ use pallet_transaction_payment::CurrencyAdapter; use frame_support::{ construct_runtime, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem}, + traits::{ConstU128, ConstU32, ConstU64, ConstU8}, weights::{ constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, IdentityFee, Weight, @@ -209,17 +209,11 @@ impl pallet_sassafras::Config for Runtime { impl pallet_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type KeyOwnerProofSystem = (); - type KeyOwnerProof = - >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; - type HandleEquivocation = (); type WeightInfo = (); type MaxAuthorities = ConstU32; type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = sp_core::Void; + type EquivocationReportSystem = (); } impl pallet_timestamp::Config for Runtime { @@ -239,6 +233,10 @@ impl pallet_balances::Config for Runtime { type ExistentialDeposit = ConstU128<500>; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; + type FreezeIdentifier = (); + type MaxFreezes = (); + type HoldIdentifier = (); + type MaxHolds = (); } impl pallet_transaction_payment::Config for Runtime { @@ -340,6 +338,14 @@ impl_runtime_apis! { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } } impl sp_block_builder::BlockBuilder for Runtime { diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 533546a8de653..6ffa0c473c855 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -36,7 +36,6 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_consensus_vrf::schnorrkel::VRFOutput; use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 3b6a37d38e18a..2563a657fcc2d 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -250,8 +250,9 @@ impl Epoch { } } -/// Read latest finalized protocol configuration. -pub fn configuration(client: &C) -> ClientResult +/// Read protocol configuration from the blockchain state corresponding +/// to the last finalized block +pub fn finalized_configuration(client: &C) -> ClientResult where B: BlockT, C: ProvideRuntimeApi + UsageProvider, diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 0beb1391706c6..f5ee862eb392b 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -238,7 +238,7 @@ impl TestContext { // Note: configuration is loaded using the `TestClient` instance as the runtime-api // provider. In practice this will use the values defined within the test runtime // defined in the `substrate_test_runtime` crate. - let config = crate::configuration(&*client).expect("config available"); + let config = crate::finalized_configuration(&*client).expect("config available"); let (block_import, link) = create_test_block_import(client.clone(), config.clone()); @@ -848,7 +848,7 @@ impl TestNetFactory for SassafrasTestNet { ) { let client = client.as_client(); - let config = crate::configuration(&*client).expect("config available"); + let config = crate::finalized_configuration(&*client).expect("config available"); let (block_import, link) = create_test_block_import(client.clone(), config); (BlockImportAdapter::new(block_import.clone()), None, Some(PeerData { link, block_import })) @@ -859,7 +859,7 @@ impl TestNetFactory for SassafrasTestNet { let data = maybe_link.as_ref().expect("data provided to verifier instantiation"); - let config = crate::configuration(&*client).expect("config available"); + let config = crate::finalized_configuration(&*client).expect("config available"); create_test_verifier(client.clone(), &data.link, config) } From 8d9b2284e5cadae4b95b18a99cafb5bf0981de33 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 28 Mar 2023 11:04:01 +0200 Subject: [PATCH 23/62] Define log target --- client/consensus/sassafras/src/authorship.rs | 40 +++++++++++-------- .../consensus/sassafras/src/block_import.rs | 33 ++++++++------- client/consensus/sassafras/src/lib.rs | 14 ++++--- .../consensus/sassafras/src/verification.rs | 25 +++++++----- frame/sassafras/src/lib.rs | 32 +++++++++------ 5 files changed, 83 insertions(+), 61 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 13a5563c608e1..0df19dffaae8e 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -54,14 +54,19 @@ pub(crate) fn claim_slot( let (authority_idx, ticket_aux) = match ticket { Some(ticket) => { - log::debug!(target: "sassafras", "🌳 [TRY PRIMARY]"); + log::debug!(target: LOG_TARGET, "[TRY PRIMARY]"); let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket)?.clone(); - log::debug!(target: "sassafras", "🌳 Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", - &ticket.as_bytes()[0..8], authority_idx, ticket_aux.attempt); + log::debug!( + target: LOG_TARGET, + "Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", + &ticket.as_bytes()[0..8], + authority_idx, + ticket_aux.attempt + ); (authority_idx, Some(ticket_aux)) }, None => { - log::debug!(target: "sassafras", "🌳 [TRY SECONDARY]"); + log::debug!(target: LOG_TARGET, "[TRY SECONDARY]"); (secondary_authority_index(slot, config), None) }, }; @@ -101,7 +106,7 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec, Epoch>; fn logging_target(&self) -> &'static str { - "sassafras" + LOG_TARGET } fn block_import(&mut self) -> &mut Self::BlockImport { @@ -217,13 +222,13 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { - debug!(target: "sassafras", "🌳 Attempting to claim slot {}", slot); + debug!(target: LOG_TARGET, "Attempting to claim slot {}", slot); // Get the next slot ticket from the runtime. let ticket = self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; // TODO-SASS-P2: remove me - debug!(target: "sassafras", "🌳 parent {}", parent_header.hash()); + debug!(target: LOG_TARGET, "parent {}", parent_header.hash()); let claim = authorship::claim_slot( slot, @@ -235,7 +240,7 @@ where &self.keystore, ); if claim.is_some() { - debug!(target: "sassafras", "🌳 Claimed slot {}", slot); + debug!(target: LOG_TARGET, "Claimed slot {}", slot); } claim } @@ -251,7 +256,7 @@ where Ok(()) => true, Err(e) => if e.is_full() { - warn!(target: "sassafras", "🌳 Trying to notify a slot but the channel is full"); + warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full"); true } else { false @@ -379,13 +384,13 @@ async fn start_tickets_worker( let epoch_desc = match find_next_epoch_digest::(¬ification.header) { Ok(Some(epoch_desc)) => epoch_desc, Err(err) => { - warn!(target: "sassafras", "🌳 Error fetching next epoch digest: {}", err); + warn!(target: LOG_TARGET, "Error fetching next epoch digest: {}", err); continue }, _ => continue, }; - debug!(target: "sassafras", "🌳 New epoch announced {:x?}", epoch_desc); + debug!(target: LOG_TARGET, "New epoch announced {:x?}", epoch_desc); let number = *notification.header.number(); let position = if number == One::one() { @@ -398,7 +403,10 @@ async fn start_tickets_worker( let mut epoch = match epoch_changes.shared_data().epoch(&epoch_identifier).cloned() { Some(epoch) => epoch, None => { - warn!(target: "🌳 sassafras", "Unexpected missing epoch data for {:?}", epoch_identifier); + warn!( + target: LOG_TARGET, + "Unexpected missing epoch data for {:?}", epoch_identifier + ); continue }, }; @@ -412,7 +420,7 @@ async fn start_tickets_worker( let best_hash = match select_chain.best_chain().await { Ok(header) => header.hash(), Err(err) => { - error!(target: "🌳 sassafras", "Error fetching best chain block id: {}", err); + error!(target: LOG_TARGET, "Error fetching best chain block id: {}", err); continue }, }; @@ -434,7 +442,7 @@ async fn start_tickets_worker( // Thus on reboot/crash we are loosing them. }, Some(err) => { - error!(target: "sassafras", "🌳 Unable to submit tickets: {}", err); + error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); }, } } @@ -538,7 +546,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send, ER: std::error::Error + Send + From + From + 'static, { - info!(target: "sassafras", "🌳 🍁 Starting Sassafras Authorship worker"); + info!(target: LOG_TARGET, "🍁 Starting Sassafras Authorship worker"); let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 4b54439be4f6a..d4bd19ee34c0d 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -173,27 +173,30 @@ where *epoch_data.start_slot + skipped_epochs * epoch_data.config.epoch_duration, ); log::warn!( - target: "sassafras", - "🌳 Epoch(s) skipped from {} to {}", - original_epoch_idx, epoch_data.epoch_idx + target: LOG_TARGET, + "Epoch(s) skipped from {} to {}", + original_epoch_idx, + epoch_data.epoch_idx ); } - log!(target: "sassafras", - log_level, - "🌳 🍁 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_idx, - hash, - slot, - viable_epoch.as_ref().start_slot, + log!( + target: LOG_TARGET, + log_level, + "New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_idx, + hash, + slot, + viable_epoch.as_ref().start_slot, ); let next_epoch = viable_epoch.increment(next_epoch_desc); - log!(target: "sassafras", - log_level, - "🌳 🍁 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, + log!( + target: LOG_TARGET, + log_level, + "Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, ); let old_epoch_changes = (*epoch_changes).clone(); @@ -218,7 +221,7 @@ where }; if let Err(e) = prune_and_import() { - warn!(target: "sassafras", "🌳 Failed to launch next epoch: {}", e); + warn!(target: LOG_TARGET, "Failed to launch next epoch: {}", e); *epoch_changes = old_epoch_changes; return Err(e) } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2563a657fcc2d..d94187f8ad140 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -98,6 +98,8 @@ pub use aux_schema::revert; pub use block_import::{block_import, SassafrasBlockImport}; pub use verification::SassafrasVerifier; +const LOG_TARGET: &str = "sassafras 🌳"; + /// Errors encountered by the Sassafras routines. #[derive(Debug, thiserror::Error)] pub enum Error { @@ -180,7 +182,7 @@ impl From> for String { // Convenience function fn sassafras_err(error: Error) -> Error { - error!(target: "sassafras", "🌳 {}", error); + error!(target: LOG_TARGET, "{}", error); error } @@ -260,7 +262,7 @@ where { let info = client.usage_info().chain; let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { - debug!(target: "sassafras", "🌳 Reading config from genesis"); + debug!(target: LOG_TARGET, "Reading config from genesis"); info.genesis_hash }); @@ -297,10 +299,10 @@ fn find_pre_digest(header: &B::Header) -> Result> let mut pre_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "sassafras", "🌳 Checking log {:?}, looking for pre runtime digest", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log); match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { (Some(_), true) => return Err(sassafras_err(Error::MultiplePreRuntimeDigests)), - (None, _) => trace!(target: "sassafras", "🌳 Ignoring digest not meant for us"), + (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), (s, false) => pre_digest = s, } } @@ -313,13 +315,13 @@ fn find_next_epoch_digest( ) -> Result, Error> { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "sassafras", "🌳 Checking log {:?}, looking for epoch change digest.", log); + trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); match (log, epoch_digest.is_some()) { (Some(ConsensusLog::NextEpochData(_)), true) => return Err(sassafras_err(Error::MultipleEpochChangeDigests)), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), - _ => trace!(target: "sassafras", "🌳 Ignoring digest not meant for us"), + _ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), } } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 4ede6b7bb7d96..a1f465975a3f8 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -92,7 +92,7 @@ fn check_header( match (&ticket, &pre_digest.ticket_aux) { (Some(ticket), Some(ticket_aux)) => { - log::debug!(target: "sassafras", "🌳 checking primary"); + log::debug!(target: LOG_TARGET, "checking primary"); let transcript = make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_idx); schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) @@ -100,20 +100,20 @@ fn check_header( .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; }, (None, None) => { - log::debug!(target: "sassafras", "🌳 checking secondary"); + log::debug!(target: LOG_TARGET, "checking secondary"); let idx = authorship::secondary_authority_index(pre_digest.slot, config); if idx != pre_digest.authority_idx { - log::error!(target: "sassafras", "🌳 Bad secondary authority index"); + log::error!(target: LOG_TARGET, "Bad secondary authority index"); return Err(Error::SlotAuthorNotFound) } }, (Some(_), None) => { - log::warn!(target: "sassafras", "🌳 Unexpected secondary authoring mechanism"); + log::warn!(target: LOG_TARGET, "Unexpected secondary authoring mechanism"); return Err(Error::UnexpectedAuthoringMechanism) }, (None, Some(_)) => if origin != BlockOrigin::NetworkInitialSync { - log::warn!(target: "sassafras", "🌳 Unexpected primary authoring mechanism"); + log::warn!(target: LOG_TARGET, "Unexpected primary authoring mechanism"); return Err(Error::UnexpectedAuthoringMechanism) }, } @@ -223,7 +223,7 @@ where }; info!( - target: "sassafras", + target: LOG_TARGET, "🌳 Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", author, slot, @@ -275,7 +275,7 @@ where ) .map_err(Error::RuntimeApi)?; - info!(target: "sassafras", "🌳 Submitted equivocation report for author {:?}", author); + info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); Ok(()) } @@ -302,7 +302,7 @@ where mut block: BlockImportParams, ) -> Result, String> { trace!( - target: "sassafras", + target: LOG_TARGET, "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", block.origin, block.header, @@ -389,7 +389,10 @@ where ) .await { - warn!(target: "sassafras", "🌳 Error checking/reporting Sassafras equivocation: {}", err); + warn!( + target: LOG_TARGET, + "Error checking/reporting Sassafras equivocation: {}", err + ); } // If the body is passed through, we need to use the runtime to check that the @@ -419,7 +422,7 @@ where block.body = Some(inner_body); } - trace!(target: "sassafras", "🌳 Checked {:?}; importing.", pre_header); + trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); telemetry!( self.telemetry; CONSENSUS_TRACE; @@ -438,7 +441,7 @@ where Ok(block) }, CheckedHeader::Deferred(a, b) => { - debug!(target: "sassafras", "🌳 Checking {:?} failed; {:?}, {:?}.", hash, a, b); + debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b); telemetry!( self.telemetry; CONSENSUS_DEBUG; diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 36bce7d08957d..3544e1679a008 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -80,6 +80,8 @@ pub mod session; // Re-export pallet symbols. pub use pallet::*; +const LOG_TARGET: &str = "runtime::sassafras 🌳"; + /// Tickets related metadata that is commonly used together. #[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] pub struct TicketsMetadata { @@ -272,7 +274,7 @@ pub mod pallet { // On the first non-zero block (i.e. block #1) this is where the first epoch // (epoch #0) actually starts. We need to adjust internal storage accordingly. if *GenesisSlot::::get() == 0 { - log::debug!(target: "sassafras", "🌳 >>> GENESIS SLOT: {:?}", pre_digest.slot); + log::debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", pre_digest.slot); Self::initialize_genesis_epoch(pre_digest.slot) } @@ -335,7 +337,7 @@ pub mod pallet { let mut metadata = TicketsMeta::::get(); - log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ received {} tickets", tickets.len()); + log::debug!(target: LOG_TARGET, "@@@@@@@@@@ received {} tickets", tickets.len()); // TODO-SASS-P4: for sure there is a better way to do this... let tickets: Vec<_> = tickets.iter().map(|t| t.ticket).collect(); @@ -408,7 +410,9 @@ pub mod pallet { fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_tickets { tickets } = call { // Discard tickets not coming from the local node - log::debug!(target: "sassafras::runtime", "🌳 Validating unsigned from {} source", + log::debug!( + target: LOG_TARGET, + "Validating unsigned from {} source", match source { TransactionSource::Local => "local", TransactionSource::InBlock => "in-block", @@ -427,8 +431,8 @@ pub mod pallet { // B) The next epoch validators // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) log::warn!( - target: "sassafras::runtime", - "🌳 Rejecting unsigned transaction from external sources.", + target: LOG_TARGET, + "Rejecting unsigned transaction from external sources.", ); return InvalidTransaction::BadSigner.into() } @@ -438,10 +442,7 @@ pub mod pallet { let current_slot_idx = Self::current_slot_index(); if current_slot_idx >= epoch_duration / 2 { - log::warn!( - target: "sassafras::runtime", - "🌳 Timeout to propose tickets, bailing out.", - ); + log::warn!(target: LOG_TARGET, "Timeout to propose tickets, bailing out.",); return InvalidTransaction::Stale.into() } @@ -726,7 +727,12 @@ impl Pallet { } else { 2 * (duration - (slot_idx + 1)) }; - log::debug!(target: "sassafras::runtime", "🌳 >>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, ticket_idx); + log::debug!( + target: LOG_TARGET, + ">>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", + slot_idx, + ticket_idx + ); ticket_idx as u32 }; @@ -820,14 +826,14 @@ impl Pallet { /// /// TODO-SASS-P3: we have to add the zk validity proofs pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { - log::debug!(target: "sassafras", "🌳 @@@@@@@@@@ submitting {} tickets", tickets.len()); + log::debug!(target: LOG_TARGET, "@@@@@@@@@@ submitting {} tickets", tickets.len()); tickets.sort_unstable_by_key(|t| t.ticket); let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(_) => true, Err(e) => { - log::error!(target: "runtime::sassafras", "Error submitting tickets {:?}", e); + log::error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); false }, } @@ -848,7 +854,7 @@ impl Pallet { match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(()) => true, Err(e) => { - log::error!(target: "runtime::sassafras", "Error submitting equivocation report: {:?}", e); + log::error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e); false }, } From fff422429de4e71fc251ca5fbe9e0b5b748a52c8 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 3 May 2023 19:24:41 +0200 Subject: [PATCH 24/62] Introduce better ticket related structures --- Cargo.lock | 4 - client/consensus/sassafras/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 98 +++++----- .../consensus/sassafras/src/block_import.rs | 2 +- client/consensus/sassafras/src/lib.rs | 59 +++--- .../consensus/sassafras/src/verification.rs | 70 +++---- frame/sassafras/src/lib.rs | 151 +++++++++------ frame/sassafras/src/mock.rs | 47 ++--- primitives/consensus/sassafras/Cargo.toml | 2 - primitives/consensus/sassafras/src/digests.rs | 15 +- primitives/consensus/sassafras/src/lib.rs | 181 ++++++++++++------ primitives/consensus/sassafras/src/vrf.rs | 92 --------- 12 files changed, 367 insertions(+), 358 deletions(-) delete mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index 918d412e0db37..6573f124e255e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9036,7 +9036,6 @@ dependencies = [ "sc-keystore", "sc-network-test", "sc-telemetry", - "schnorrkel", "sp-api", "sp-application-crypto", "sp-block-builder", @@ -9044,7 +9043,6 @@ dependencies = [ "sp-consensus", "sp-consensus-sassafras", "sp-consensus-slots", - "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-keyring", @@ -9054,7 +9052,6 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", - "tokio", ] [[package]] @@ -10626,7 +10623,6 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-consensus-slots", - "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-keystore", diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index f280841fd266f..964c0101d315c 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -22,7 +22,6 @@ parking_lot = "0.12.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } -schnorrkel = "0.9.1" sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } @@ -35,7 +34,6 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } @@ -48,4 +46,4 @@ sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -tokio = "1.22.0" \ No newline at end of file +#tokio = "1.22.0" \ No newline at end of file diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 0df19dffaae8e..9a80b3f47ea7f 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -20,10 +20,10 @@ use super::*; +use sp_application_crypto::Wraps; use sp_consensus_sassafras::{ - digests::PreDigest, - vrf::{make_slot_transcript_data, make_ticket_transcript_data}, - AuthorityId, Slot, Ticket, TicketAux, TicketEnvelope, + digests::PreDigest, make_slot_vrf_transcript, make_ticket_vrf_transcript, AuthorityId, Slot, + TicketClaim, TicketData, TicketEnvelope, TicketId, }; use sp_core::{twox_64, ByteArray}; @@ -43,7 +43,7 @@ pub(crate) fn secondary_authority_index( pub(crate) fn claim_slot( slot: Slot, epoch: &Epoch, - ticket: Option, + maybe_ticket: Option<(TicketId, TicketData)>, keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { let config = &epoch.config; @@ -52,18 +52,22 @@ pub(crate) fn claim_slot( return None } - let (authority_idx, ticket_aux) = match ticket { - Some(ticket) => { + let (authority_idx, ticket_claim) = match maybe_ticket { + Some((ticket_id, ticket_data)) => { log::debug!(target: LOG_TARGET, "[TRY PRIMARY]"); - let (authority_idx, ticket_aux) = epoch.tickets_aux.get(&ticket)?.clone(); + let (authority_idx, ticket_secret) = epoch.tickets_aux.get(&ticket_id)?.clone(); log::debug!( target: LOG_TARGET, - "Ticket = [ticket: {:02x?}, auth: {}, attempt: {}]", - &ticket.as_bytes()[0..8], + "Ticket = [ticket: {:x?}, auth: {}, attempt: {}]", + ticket_id, authority_idx, - ticket_aux.attempt + ticket_data.attempt_idx ); - (authority_idx, Some(ticket_aux)) + // TODO DAVXY : using ticket_secret + let _ = ticket_secret; + let erased_signature = [0; 64]; + let claim = TicketClaim { erased_signature }; + (authority_idx, Some(claim)) }, None => { log::debug!(target: LOG_TARGET, "[TRY SECONDARY]"); @@ -73,19 +77,13 @@ pub(crate) fn claim_slot( let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; - let transcript_data = make_slot_transcript_data(&config.randomness, slot, epoch.epoch_idx); - let signature = keystore - .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), transcript_data) + let transcript = make_slot_vrf_transcript(&config.randomness, slot, epoch.epoch_idx); + let vrf_signature = keystore + .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &transcript) .ok() .flatten()?; - let pre_digest = PreDigest { - authority_idx, - slot, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof), - ticket_aux, - }; + let pre_digest = PreDigest { authority_idx, slot, vrf_signature, ticket_claim }; Some((pre_digest, authority_id.clone())) } @@ -97,16 +95,16 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec Vec(b"context", &transcript, &signature.output) + .map(|bytes| u128::from_le_bytes(bytes)) + .ok()?; + + if ticket_id >= threshold { return None } - let envelope = TicketEnvelope { - ticket, - // TODO-SASS-P3: placeholder... - zk_proof: VRFProof::try_from([0; 64]).expect("FIXME"), - }; - let ticket_aux = - TicketAux { attempt: attempt as u32, proof: VRFProof(signature.proof) }; + // TODO DAVXY: compute proper erased_secret/public and revealed_public + let erased_secret = [0; 32]; + let erased_public = [0; 32]; + let revealed_public = [0; 32]; + let ticket_data = TicketData { attempt_idx, erased_public, revealed_public }; + + // TODO DAVXY: placeholder + let ring_proof = (); + let ticket_envelope = + TicketEnvelope { data: ticket_data, vrf_preout: signature.output, ring_proof }; + + let ticket_secret = TicketSecret { attempt_idx, erased_secret }; - Some((envelope, ticket_aux)) + Some((ticket_envelope, ticket_id, ticket_secret)) }; for attempt in 0..max_attempts { - if let Some((envelope, ticket_aux)) = make_ticket(attempt) { + if let Some((envelope, ticket_id, ticket_secret)) = make_ticket(attempt) { epoch .tickets_aux - .insert(envelope.ticket, (authority_idx as AuthorityIndex, ticket_aux)); + .insert(ticket_id, (authority_idx as AuthorityIndex, ticket_secret)); tickets.push(envelope); } } @@ -225,7 +234,8 @@ where debug!(target: LOG_TARGET, "Attempting to claim slot {}", slot); // Get the next slot ticket from the runtime. - let ticket = self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; + let maybe_ticket = + self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; // TODO-SASS-P2: remove me debug!(target: LOG_TARGET, "parent {}", parent_header.hash()); @@ -236,7 +246,7 @@ where .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))? .as_ref(), - ticket, + maybe_ticket, &self.keystore, ); if claim.is_some() { diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index d4bd19ee34c0d..6bc3b93595801 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -412,7 +412,7 @@ where ) })?; - let total_weight = parent_weight + pre_digest.ticket_aux.is_some() as u32; + let total_weight = parent_weight + pre_digest.ticket_claim.is_some() as u32; aux_schema::write_block_weight(hash, total_weight, |values| { block diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index d94187f8ad140..4ab862ddb5f1b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -40,7 +40,6 @@ use log::{debug, error, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; use scale_codec::{Decode, Encode}; -use schnorrkel::SignatureError; use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; use sc_consensus::{ @@ -65,7 +64,7 @@ use sp_consensus::{ BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; use sp_consensus_slots::Slot; -use sp_core::{crypto::ByteArray, ExecutionContext, Pair}; +use sp_core::{ExecutionContext, Pair}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -78,11 +77,10 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, - vrf::{make_slot_transcript, make_ticket_transcript}, - AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, SassafrasApi, - SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, Ticket, - TicketAux, TicketEnvelope, VRFOutput, VRFProof, SASSAFRAS_ENGINE_ID, VRF_OUTPUT_LENGTH, - VRF_PROOF_LENGTH, + make_slot_vrf_transcript, make_ticket_vrf_transcript, AuthorityId, AuthorityIndex, + AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, + SassafrasConfiguration, SassafrasEpochConfiguration, TicketClaim, TicketData, TicketEnvelope, + TicketId, TicketSecret, SASSAFRAS_ENGINE_ID, }; mod authorship; @@ -100,6 +98,9 @@ pub use verification::SassafrasVerifier; const LOG_TARGET: &str = "sassafras 🌳"; +/// Intermediate key for Babe engine. +pub const INTERMEDIATE_KEY: &[u8] = b"sass1"; + /// Errors encountered by the Sassafras routines. #[derive(Debug, thiserror::Error)] pub enum Error { @@ -137,8 +138,8 @@ pub enum Error { #[error("Bad signature on {0:?}")] BadSignature(B::Hash), /// VRF verification failed - #[error("VRF verification failed: {0:?}")] - VRFVerificationFailed(SignatureError), + #[error("VRF verification failed")] + VrfVerificationFailed, /// Unexpected authoring mechanism #[error("Unexpected authoring mechanism")] UnexpectedAuthoringMechanism, @@ -180,10 +181,10 @@ impl From> for String { } } -// Convenience function -fn sassafras_err(error: Error) -> Error { - error!(target: LOG_TARGET, "{}", error); - error +// Convenience function for error logging +fn sassafras_err(err: Error) -> Error { + error!(target: LOG_TARGET, "{}", err); + err } /// Sassafras epoch information augmented with private tickets information. @@ -195,8 +196,8 @@ pub struct Epoch { pub start_slot: Slot, /// Epoch configuration. pub config: SassafrasConfiguration, - /// Tickets auxiliary data. - pub tickets_aux: BTreeMap, + /// Tickets associated secret data. + pub tickets_aux: BTreeMap, } impl From for Epoch { @@ -276,25 +277,19 @@ pub struct SassafrasIntermediate { pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } -/// Intermediate key for Babe engine. -pub static INTERMEDIATE_KEY: &[u8] = b"sass1"; - -/// Extract the Sassafras pre digest from the given header. Pre-runtime digests are -/// mandatory, the function will return `Err` if none is found. +/// Extract the Sassafras pre digest from the given header. +/// +/// Pre-runtime digests are mandatory, the function will return `Err` if none is found. fn find_pre_digest(header: &B::Header) -> Result> { - // Genesis block doesn't contain a pre digest so let's generate a - // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { - const PROOF: &str = "zero sequence is a valid vrf output/proof; qed"; - let vrf_output = VRFOutput::try_from([0; VRF_OUTPUT_LENGTH]).expect(PROOF); - let vrf_proof = VRFProof::try_from([0; VRF_PROOF_LENGTH]).expect(PROOF); - return Ok(PreDigest { - authority_idx: 0, - slot: 0.into(), - vrf_output, - vrf_proof, - ticket_aux: None, - }) + // Genesis block doesn't contain a pre digest so let's generate a + // dummy one to not break any invariants in the rest of the code + use sp_consensus_sassafras::VrfTranscript; + use sp_core::crypto::VrfSigner; + let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); + let transcript = VrfTranscript::new(b"", &[]); + let vrf_signature = pair.as_ref().vrf_sign(&transcript); + return Ok(PreDigest { authority_idx: 0, slot: 0.into(), ticket_claim: None, vrf_signature }) } let mut pre_digest: Option<_> = None; diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index a1f465975a3f8..b9ac0ac2694fd 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -18,6 +18,8 @@ //! Types and functions related to block verification. +use sp_application_crypto::Wraps; + use super::*; // Allowed slot drift. @@ -36,15 +38,15 @@ struct VerificationParams<'a, B: 'a + BlockT> { /// Origin origin: BlockOrigin, /// Expected ticket for this block. - ticket: Option, + maybe_ticket: Option<(TicketId, TicketData)>, } /// Verified information struct VerifiedHeaderInfo { /// Authority index. authority_id: AuthorityId, - /// Seal found within the header. - seal: DigestItem, + // /// Seal found within the header. + // seal: DigestItem, } /// Check a header has been signed by the right key. If the slot is too far in @@ -59,7 +61,7 @@ struct VerifiedHeaderInfo { fn check_header( params: VerificationParams, ) -> Result, Error> { - let VerificationParams { mut header, pre_digest, slot_now, epoch, origin, ticket } = params; + let VerificationParams { header, pre_digest, slot_now, epoch, origin, maybe_ticket } = params; let config = &epoch.config; // Check that the slot is not in the future, with some drift being allowed. @@ -74,30 +76,40 @@ fn check_header( // Check header signature - let seal = header - .digest_mut() - .pop() - .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + // Check slot-vrf proof - let signature = seal - .as_sassafras_seal() - .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + // TODO DAVXY: probably there is not need to also add an explicit `Seal` + // it would be just redundant and we can just push the block header hash within + // the slot-vrf-transcript - let pre_hash = header.hash(); - if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { - return Err(sassafras_err(Error::BadSignature(pre_hash))) + use sp_core::crypto::VrfVerifier; + let transcript = make_slot_vrf_transcript(&config.randomness, pre_digest.slot, epoch.epoch_idx); + if !authority_id.as_inner_ref().vrf_verify(&transcript, &pre_digest.vrf_signature) { + return Err(sassafras_err(Error::VrfVerificationFailed)) } + // let seal = header + // .digest_mut() + // .pop() + // .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + + // let signature = seal + // .as_sassafras_seal() + // .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + + // let pre_hash = header.hash(); + // if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { + // return Err(sassafras_err(Error::BadSignature(pre_hash))) + // } + // Check authorship method and claim - match (&ticket, &pre_digest.ticket_aux) { - (Some(ticket), Some(ticket_aux)) => { + match (&maybe_ticket, &pre_digest.ticket_claim) { + (Some((_ticket_id, ticket_data)), Some(ticket_claim)) => { log::debug!(target: LOG_TARGET, "checking primary"); - let transcript = - make_ticket_transcript(&config.randomness, ticket_aux.attempt, epoch.epoch_idx); - schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &ticket, &ticket_aux.proof)) - .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; + // TODO DAVXY: check erased_signature + let _public = ticket_data.erased_public; + let _signature = ticket_claim.erased_signature; }, (None, None) => { log::debug!(target: LOG_TARGET, "checking secondary"); @@ -118,14 +130,7 @@ fn check_header( }, } - // Check slot-vrf proof - - let transcript = make_slot_transcript(&config.randomness, pre_digest.slot, epoch.epoch_idx); - schnorrkel::PublicKey::from_bytes(authority_id.as_slice()) - .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) - .map_err(|s| sassafras_err(Error::VRFVerificationFailed(s)))?; - - let info = VerifiedHeaderInfo { authority_id, seal }; + let info = VerifiedHeaderInfo { authority_id }; Ok(CheckedHeader::Checked(header, info)) } @@ -354,7 +359,7 @@ where .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) .ok_or(Error::::FetchEpoch(parent_hash))?; - let ticket = self + let maybe_ticket = self .client .runtime_api() .slot_ticket(parent_hash, pre_digest.slot) @@ -367,7 +372,7 @@ where slot_now, epoch: viable_epoch.as_ref(), origin: block.origin, - ticket, + maybe_ticket, }; let checked_header = check_header::(verification_params)?; @@ -432,7 +437,8 @@ where block.header = pre_header; block.post_hash = Some(hash); - block.post_digests.push(verified_info.seal); + // TODO DAVXY: seal required??? + // block.post_digests.push(verified_info.seal); block.insert_intermediate( INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }, diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 3544e1679a008..f1e0e131bdcee 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -43,7 +43,7 @@ //! To anonymously publish the ticket to the chain a validator sends their tickets //! to a random validator who later puts it on-chain as a transaction. -#![deny(warnings)] +// #![deny(warnings)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] @@ -55,8 +55,8 @@ use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, - SassafrasConfiguration, SassafrasEpochConfiguration, Slot, Ticket, TicketEnvelope, - SASSAFRAS_ENGINE_ID, + SassafrasConfiguration, SassafrasEpochConfiguration, Slot, TicketData, TicketEnvelope, + TicketId, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -213,18 +213,24 @@ pub mod pallet { #[pallet::storage] pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; - /// Tickets to be used for current and next epoch. + /// Tickets identifiers. /// The key is a tuple composed by: /// - `u8` equal to epoch-index mod 2 /// - `u32` equal to the slot-index. #[pallet::storage] - pub type Tickets = StorageMap<_, Identity, (u8, u32), Ticket>; + pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; + + /// Tickets to be used for current and next epoch. + #[pallet::storage] + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketData, ValueQuery>; /// Next epoch tickets temporary accumulator. /// Special `u32::MAX` key is reserved for partially sorted segment. + // This bound is set as `MaxTickets` in the unlucky case where we receive one Ticket at a time. + // The max capacity is thus MaxTickets^2. Not much, given that we save TicketIds here. #[pallet::storage] pub type NextTicketsSegments = - StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; + StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; /// Genesis configuration for Sassafras protocol. #[cfg_attr(feature = "std", derive(Default))] @@ -295,12 +301,11 @@ pub mod pallet { let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - // TODO-SASS-P3: apparently this is not 100% ok - // `vrf_output` should be processed using `attach_input_hash(&pubkey, transcript)` - Self::deposit_randomness(pre_digest.vrf_output.as_bytes()); + // TODO-SASS-P3: use make-bytes!!! + // let bytes = .... ; for the moment we just use the pre-output + Self::deposit_randomness(pre_digest.vrf_signature.output.0.as_bytes()); - // If we are in the second half of the epoch, we can start sorting the next epoch - // tickets. + // If we are in the epoch's second half, we start sorting the next epoch tickets. let epoch_duration = T::EpochDuration::get(); let current_slot_idx = Self::slot_index(pre_digest.slot); if current_slot_idx >= epoch_duration / 2 { @@ -308,15 +313,13 @@ pub mod pallet { if metadata.segments_count != 0 { let epoch_idx = EpochIndex::::get() + 1; let epoch_tag = (epoch_idx & 1) as u8; - if metadata.segments_count != 0 { - let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); - Self::sort_tickets( - u32::max(1, metadata.segments_count / slots_left as u32), - epoch_tag, - &mut metadata, - ); - TicketsMeta::::set(metadata); - } + let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); + Self::sort_tickets( + u32::max(1, metadata.segments_count / slots_left as u32), + epoch_tag, + &mut metadata, + ); + TicketsMeta::::set(metadata); } } } @@ -328,7 +331,7 @@ pub mod pallet { /// /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remove the weight? #[pallet::call_index(0)] - #[pallet::weight(10_000)] + #[pallet::weight({0})] pub fn submit_tickets( origin: OriginFor, tickets: BoundedVec, @@ -339,13 +342,38 @@ pub mod pallet { log::debug!(target: LOG_TARGET, "@@@@@@@@@@ received {} tickets", tickets.len()); - // TODO-SASS-P4: for sure there is a better way to do this... - let tickets: Vec<_> = tickets.iter().map(|t| t.ticket).collect(); - let tickets = BoundedVec::<_, T::MaxTickets>::try_from(tickets).expect("TODO"); + // Check tickets score + let next_auth = NextAuthorities::::get(); + let epoch_config = EpochConfig::::get(); + // Current slot should be less than half of epoch duration. + let epoch_duration = T::EpochDuration::get(); + let threshold = sp_consensus_sassafras::compute_ticket_id_threshold( + epoch_config.redundancy_factor, + epoch_duration as u32, + epoch_config.attempts_number, + next_auth.len() as u32, + ); + + let epoch_idx = EpochIndex::::get(); + let randomness = CurrentRandomness::::get(); - // We just require a unique key to save the partial tickets list. + let mut segment = BoundedVec::with_max_capacity(); + for ticket in tickets.iter() { + let input = sp_consensus_sassafras::make_ticket_vrf_transcript( + &randomness, + ticket.data.attempt_idx, + epoch_idx, + ); + let id = sp_consensus_sassafras::make_ticket_value(&input, &ticket.vrf_preout); + if id < threshold { + TicketsData::::set(id, ticket.data.clone()); + segment.try_push(id).expect("has same length as bounded input vector; qed"); + } + } + + // We just require a unique key to save the tickets ids segment. metadata.segments_count += 1; - NextTicketsSegments::::insert(metadata.segments_count, tickets); + NextTicketsSegments::::insert(metadata.segments_count, segment); TicketsMeta::::set(metadata); Ok(()) } @@ -360,7 +388,7 @@ pub mod pallet { /// /// TODO-SASS-P4: proper weight #[pallet::call_index(1)] - #[pallet::weight(10_000)] + #[pallet::weight({0})] pub fn plan_config_change( origin: OriginFor, config: SassafrasEpochConfiguration, @@ -386,7 +414,7 @@ pub mod pallet { /// /// TODO-SASS-P4: proper weight #[pallet::call_index(2)] - #[pallet::weight(10_000)] + #[pallet::weight({0})] pub fn report_equivocation_unsigned( origin: OriginFor, _equivocation_proof: EquivocationProof, @@ -446,22 +474,30 @@ pub mod pallet { return InvalidTransaction::Stale.into() } - // Check tickets are below threshold - let next_auth = NextAuthorities::::get(); - let epoch_config = EpochConfig::::get(); - let threshold = sp_consensus_sassafras::compute_threshold( - epoch_config.redundancy_factor, - epoch_duration as u32, - epoch_config.attempts_number, - next_auth.len() as u32, - ); - if !tickets - .iter() - .all(|t| sp_consensus_sassafras::check_threshold(&t.ticket, threshold)) - { - // TODO-SASS-P3: also check ZK proof to assert origin validity - return InvalidTransaction::Custom(0).into() - } + // // Check tickets score + // let next_auth = NextAuthorities::::get(); + // let epoch_config = EpochConfig::::get(); + + // TODO DAVXY + // If we insert the pre-computed id within the body then we can: + // 1. check for equality (not strictly required as far as the output is < threshold) + // 2. avoid recompute it in the submit call that will follow... + // Unfortunatelly here we can't discard a subset of the tickets... + // so we have to decide if we want to discard the whole set in presence of "bad + // apples" + // let threshold = sp_consensus_sassafras::compute_ticket_id_threshold( + // epoch_config.redundancy_factor, + // epoch_duration as u32, + // epoch_config.attempts_number, + // next_auth.len() as u32, + // ); + // for ticket in tickets { + // let _preout = ticket.vrf_preout.clone(); + // // TODO DAVXY: here we have to call vrf preout.make_bytes()... + // // Available with thin-vrf. Not available with plain schnorrkel without public + // // key. For now, just set as the preout + // // Check score... + // } // This should be set such that it is discarded after the first epoch half // TODO-SASS-P3: double check this. Should we then check again in the extrinsic @@ -715,7 +751,7 @@ impl Pallet { /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), /// or if the slot falls beyond the next epoch. - pub fn slot_ticket(slot: Slot) -> Option { + pub fn slot_ticket_id(slot: Slot) -> Option { let epoch_idx = EpochIndex::::get(); let duration = T::EpochDuration::get(); let mut slot_idx = Self::slot_index(slot); @@ -753,12 +789,20 @@ impl Pallet { let ticket_idx = get_ticket_idx(slot_idx); if ticket_idx < tickets_meta.tickets_count[epoch_tag as usize] { - Tickets::::get((epoch_tag, ticket_idx)) + TicketsIds::::get((epoch_tag, ticket_idx)) } else { None } } + /// Returns ticket data associated to the given `slot`. + /// + /// Refer to the `slot_ticket_id` documentation for the slot-ticket association + /// criteria. + pub fn slot_ticket(slot: Slot) -> Option { + Self::slot_ticket_id(slot).map(|id| TicketsData::::get(id)) + } + // Lexicographically sort the tickets who belongs to the next epoch. // // Tickets are fetched from at most `max_iter` segments received via the `submit_tickets` @@ -780,7 +824,7 @@ impl Pallet { let mut sup = if new_segment.len() >= max_tickets { new_segment[new_segment.len() - 1] } else { - Ticket::try_from([0xFF; 32]).expect("This is a valid vrf output value; qed") + TicketId::MAX }; // Consume at most `max_iter` segments. @@ -791,6 +835,7 @@ impl Pallet { if new_segment.len() > max_tickets { require_sort = false; new_segment.sort_unstable(); + new_segment[max_tickets..].iter().for_each(|id| TicketsData::::remove(id)); new_segment.truncate(max_tickets); sup = new_segment[max_tickets - 1]; } @@ -805,8 +850,8 @@ impl Pallet { if segments_count == 0 { // Sort is over, write to next epoch map. // TODO-SASS-P3: is there a better way to write a map from a vector? - new_segment.iter().enumerate().for_each(|(i, t)| { - Tickets::::insert((epoch_tag, i as u32), t); + new_segment.iter().enumerate().for_each(|(i, id)| { + TicketsIds::::insert((epoch_tag, i as u32), id); }); metadata.tickets_count[epoch_tag as usize] = new_segment.len() as u32; } else { @@ -814,7 +859,7 @@ impl Pallet { NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); } - metadata.segments_count = segments_count; + // metadata.segments_count = segments_count; } /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to @@ -824,10 +869,8 @@ impl Pallet { /// is called within the first half of the epoch. That is, tickets received within the /// second half are dropped. /// - /// TODO-SASS-P3: we have to add the zk validity proofs - pub fn submit_tickets_unsigned_extrinsic(mut tickets: Vec) -> bool { - log::debug!(target: LOG_TARGET, "@@@@@@@@@@ submitting {} tickets", tickets.len()); - tickets.sort_unstable_by_key(|t| t.ticket); + /// TODO-SASS-P3: use pass a bounded vector??? + pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index d9c0e6edfc743..51b76cc77cc9c 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -22,12 +22,11 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever}; use frame_support::traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, - vrf::{self, VRFOutput, VRFProof}, - AuthorityIndex, AuthorityPair, Slot, TicketEnvelope, + digests::PreDigest, AuthorityIndex, AuthorityPair, Slot, TicketData, TicketEnvelope, + VrfSignature, }; use sp_core::{ - crypto::{IsWrappedBy, Pair}, + crypto::{Pair, VrfSigner}, H256, U256, }; use sp_runtime::{ @@ -135,9 +134,7 @@ pub fn new_test_ext_with_pairs( (pairs, storage.into()) } -fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { - let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); - +fn make_ticket(slot: Slot, attempt: u32, pair: &AuthorityPair) -> TicketEnvelope { let mut epoch = Sassafras::epoch_index(); let mut randomness = Sassafras::randomness(); @@ -148,12 +145,17 @@ fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput randomness = crate::NextRandomness::::get(); } - let transcript = vrf::make_ticket_transcript(&randomness, attempt, epoch); - let inout = pair.vrf_sign(transcript); - let output = VRFOutput(inout.0.to_output()); - let proof = VRFProof(inout.1); + let transcript = + sp_consensus_sassafras::make_ticket_vrf_transcript(&randomness, attempt, epoch); + + // TODO DAVXY: NOT REQUIRED ONCE WE HAVE THE NEW API... + // (i.e. we just require the preout) + let signature = pair.as_ref().vrf_sign(&transcript); - (output, proof) + // TODO DAVXY: use some well known valid test keys... + let data = + TicketData { attempt_idx: attempt, erased_public: [0; 32], revealed_public: [0; 32] }; + TicketEnvelope { data, vrf_preout: signature.output, ring_proof: () } } /// Construct at most `attempts` tickets for the given `slot`. @@ -162,16 +164,11 @@ fn make_ticket_vrf(slot: Slot, attempt: u32, pair: &AuthorityPair) -> (VRFOutput pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec { (0..attempts) .into_iter() - .map(|attempt| { - let (ticket, zk_proof) = make_ticket_vrf(slot, attempt, pair); - TicketEnvelope { ticket, zk_proof } - }) + .map(|attempt| make_ticket(slot, attempt, pair)) .collect() } -fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { - let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); - +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { let mut epoch = Sassafras::epoch_index(); let mut randomness = Sassafras::randomness(); @@ -182,12 +179,8 @@ fn make_slot_vrf(slot: Slot, pair: &AuthorityPair) -> (VRFOutput, VRFProof) { randomness = crate::NextRandomness::::get(); } - let transcript = vrf::make_slot_transcript(&randomness, slot, epoch); - let inout = pair.vrf_sign(transcript); - let output = VRFOutput(inout.0.to_output()); - let proof = VRFProof(inout.1); - - (output, proof) + let transcript = sp_consensus_sassafras::make_slot_vrf_transcript(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&transcript) } /// Produce a `PreDigest` instance for the given parameters. @@ -196,8 +189,8 @@ pub fn make_pre_digest( slot: Slot, pair: &AuthorityPair, ) -> PreDigest { - let (vrf_output, vrf_proof) = make_slot_vrf(slot, pair); - PreDigest { authority_idx, slot, vrf_output, vrf_proof, ticket_aux: None } + let vrf_signature = slot_claim_vrf_signature(slot, pair); + PreDigest { authority_idx, slot, vrf_signature, ticket_aux: None } } /// Produce a `PreDigest` instance for the given parameters and wrap the result into a `Digest` diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 7fdec9e22371d..3a58dd5a8f5fa 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -22,7 +22,6 @@ serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../vrf" } sp-core = { version = "7.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } @@ -41,7 +40,6 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-consensus-slots/std", - "sp-consensus-vrf/std", "sp-core/std", "sp-inherents/std", "sp-keystore/std", diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 1b5fabc144bf4..3df1ac18fb46e 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,14 +18,14 @@ //! Private implementation details of Sassafras digests. use super::{ - AuthorityId, AuthorityIndex, AuthoritySignature, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, Slot, TicketAux, SASSAFRAS_ENGINE_ID, + AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, SassafrasAuthorityWeight, + SassafrasEpochConfiguration, Slot, TicketClaim, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; +use sp_core::sr25519::vrf::VrfSignature; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; @@ -36,12 +36,11 @@ pub struct PreDigest { pub authority_idx: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, - /// Slot VRF output. - pub vrf_output: VRFOutput, - /// Slot VRF proof. - pub vrf_proof: VRFProof, + /// Slot claim VRF signature. + /// TODO DAVXY we can store this Signature as a Seal DigestItem + pub vrf_signature: VrfSignature, /// Ticket auxiliary information for claim check. - pub ticket_aux: Option, + pub ticket_claim: Option, } /// Information about the next epoch. This is broadcast in the first block diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index b3079e01197ce..395f3f2e85a26 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -18,27 +18,24 @@ //! Primitives for Sassafras //! TODO-SASS-P2 : write proper docs -#![deny(warnings)] -#![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] +// TODO DAVXY enable warnings +// #![deny(warnings)] +// #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_core::{crypto, U256}; +use sp_core::crypto::KeyTypeId; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; -pub use sp_consensus_vrf::schnorrkel::{ - PublicKey, Randomness, VRFOutput, VRFProof, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, - VRF_PROOF_LENGTH, -}; +pub use sp_core::sr25519::vrf::{VrfOutput, VrfProof, VrfSignature, VrfTranscript}; pub mod digests; pub mod inherents; -pub mod vrf; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; @@ -46,7 +43,16 @@ mod app { } /// Key type for Sassafras protocol. -pub const KEY_TYPE: crypto::KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; +pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + +/// Consensus engine identifier. +pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; + +/// VRF context used for per-slot randomness generation. +pub const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasRandomnessVRFContext"; + +/// VRF output length for per-slot randomness. +pub const RANDOMNESS_LENGTH: usize = 32; /// The index of an authority. pub type AuthorityIndex = u32; @@ -63,12 +69,6 @@ pub type AuthoritySignature = app::Signature; /// the main Sassafras module. If that ever changes, then this must, too. pub type AuthorityId = app::Public; -/// The `ConsensusEngineId` of BABE. -pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; - -/// The length of the public key -pub const PUBLIC_KEY_LENGTH: usize = 32; - /// The weight of an authority. // NOTE: we use a unique name for the weight to avoid conflicts with other // `Weight` types, since the metadata isn't able to disambiguate. @@ -81,8 +81,11 @@ pub type SassafrasBlockWeight = u32; /// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). pub type EquivocationProof = sp_consensus_slots::EquivocationProof; +/// Randomness required by some SASSAFRAS operations. +pub type Randomness = [u8; RANDOMNESS_LENGTH]; + /// Configuration data used by the Sassafras consensus engine. -#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] pub struct SassafrasConfiguration { /// The slot duration in milliseconds. pub slot_duration: u64, @@ -104,7 +107,7 @@ impl SassafrasConfiguration { } /// Sassafras epoch information -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug, TypeInfo)] pub struct Epoch { /// The epoch index. pub epoch_idx: u64, @@ -125,41 +128,63 @@ pub struct SassafrasEpochConfiguration { pub attempts_number: u32, } -/// Ticket value. -pub type Ticket = VRFOutput; +/// Ticket identifier. +pub type TicketId = u128; + +/// TODO DAVXY +/// input obtained via `make_vrf_input_transcript` +pub fn make_ticket_value(_in: &VrfTranscript, out: &VrfOutput) -> TicketId { + // TODO DAVXY temporary way to generate id... use io.make_bytes() + let preout = out; + let mut raw: [u8; 16] = [0; 16]; + raw.copy_from_slice(&preout.0 .0[0..16]); + u128::from_le_bytes(raw) +} -/// Ticket proof. -pub type TicketProof = VRFProof; +/// Ticket value. +// TODO: potentially this can be opaque to separate the protocol from the application +#[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketData { + /// Attempt index. + pub attempt_idx: u32, + /// Ed25519 public key which gets erased when claiming the ticket. + pub erased_public: [u8; 32], + /// Ed25519 public key which gets exposed when claiming the ticket. + pub revealed_public: [u8; 32], +} /// Ticket ZK commitment proof. /// TODO-SASS-P3: this is a placeholder. -pub type TicketZkProof = VRFProof; +pub type TicketRingProof = (); /// Ticket envelope used on submission. // TODO-SASS-P3: we are currently using Shnorrkel structures as placeholders. // Should switch to new RVRF primitive soon. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketEnvelope { - /// Ring VRF output. - pub ticket: Ticket, - /// Ring VRF zk proof. - pub zk_proof: TicketZkProof, - // Ticket opaque utility data. - // TODO-SASS-P3: Interpretation of this data is up to the application? Investigate - // Suggested by Jeff: - // - ephemeral_pk: public key used to... - // - revealed_pk: ??? - // - gossip_auth_id: identifier to reach this actor in a separate gossip network - //pub data: Vec, + /// VRF output. + pub data: TicketData, + /// VRF pre-output used to generate the ticket id. + pub vrf_preout: VrfOutput, + // /// Pedersen VRF signature + // pub ped_signature: (), + /// Ring VRF proof. + pub ring_proof: TicketRingProof, } /// Ticket private auxiliary information. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketAux { - /// Attempt number. - pub attempt: u32, - /// Ticket proof used to claim a slot. - pub proof: TicketProof, +pub struct TicketSecret { + /// Attempt index. + pub attempt_idx: u32, + /// Ed25519 used to claim ticket ownership. + pub erased_secret: [u8; 32], +} + +/// Ticket claim information filled by the block author. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketClaim { + pub erased_signature: [u8; 64], } /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: @@ -170,39 +195,74 @@ pub struct TicketAux { /// The parameters should be chosen such that T <= 1. /// If `attempts * validators` is zero then we fallback to T = 0 // TODO-SASS-P3: this formula must be double-checked... -pub fn compute_threshold(redundancy: u32, slots: u32, attempts: u32, validators: u32) -> U256 { +pub fn compute_ticket_id_threshold( + redundancy: u32, + slots: u32, + attempts: u32, + validators: u32, +) -> TicketId { let den = attempts as u64 * validators as u64; let num = redundancy as u64 * slots as u64; - U256::max_value() + TicketId::max_value() .checked_div(den.into()) - .unwrap_or(U256::zero()) + .unwrap_or_default() .saturating_mul(num.into()) } -/// Returns true if the given VRF output is lower than the given threshold, false otherwise. -pub fn check_threshold(ticket: &Ticket, threshold: U256) -> bool { - U256::from(ticket.as_bytes()) < threshold -} - /// An opaque type used to represent the key ownership proof at the runtime API boundary. /// The inner value is an encoded representation of the actual key ownership proof which will be /// parameterized when defining the runtime. At the runtime API boundary this type is unknown and /// as such we keep this opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq)] +#[derive(Decode, Encode, PartialEq, TypeInfo)] pub struct OpaqueKeyOwnershipProof(Vec); -impl OpaqueKeyOwnershipProof { - /// Create a new `OpaqueKeyOwnershipProof` using the given encoded representation. - pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { - OpaqueKeyOwnershipProof(inner) - } +// impl OpaqueKeyOwnershipProof { +// /// Create a new `OpaqueKeyOwnershipProof` using the given encoded representation. +// pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { +// OpaqueKeyOwnershipProof(inner) +// } + +// /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key +// /// ownership proof type. +// pub fn decode(self) -> Option { +// Decode::decode(&mut &self.0[..]).ok() +// } +// } + +/// Make per slot randomness VRF input transcript. +/// +/// Input randomness is current epoch randomness. +pub fn make_slot_vrf_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfTranscript { + VrfTranscript::new( + &SASSAFRAS_ENGINE_ID, + &[ + (b"type", b"slot-transcript"), + (b"slot", &slot.to_le_bytes()), + (b"epoch", &epoch.to_le_bytes()), + (b"randomness", randomness), + ], + ) +} - /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key - /// ownership proof type. - pub fn decode(self) -> Option { - Decode::decode(&mut &self.0[..]).ok() - } +/// Make ticket VRF transcript data container. +/// +/// Input randomness is current epoch randomness. +#[cfg(feature = "std")] +pub fn make_ticket_vrf_transcript( + randomness: &Randomness, + attempt: u32, + epoch: u64, +) -> VrfTranscript { + VrfTranscript::new( + &SASSAFRAS_ENGINE_ID, + &[ + (b"type", b"ticket-transcript"), + (b"attempt", &attempt.to_le_bytes()), + (b"epoch", &epoch.to_le_bytes()), + (b"randomness", randomness), + ], + ) } // Runtime API. @@ -213,8 +273,11 @@ sp_api::decl_runtime_apis! { /// This method returns `false` when creation of the extrinsics fails. fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; - /// Get expected ticket value for the given slot. - fn slot_ticket(slot: Slot) -> Option; + /// Get ticket id associated to the given slot. + fn slot_ticket_id(slot: Slot) -> Option; + + /// Get ticket id and data associated to the given slot. + fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketData)>; /// Current epoch information. fn current_epoch() -> Epoch; diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs deleted file mode 100644 index 1c46fe77a6c6e..0000000000000 --- a/primitives/consensus/sassafras/src/vrf.rs +++ /dev/null @@ -1,92 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Primitives related to VRF input and output. - -pub use merlin::Transcript; - -pub use sp_consensus_slots::Slot; -pub use sp_consensus_vrf::schnorrkel::{PublicKey, Randomness, VRFOutput, VRFProof}; -#[cfg(feature = "std")] -use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; - -use crate::SASSAFRAS_ENGINE_ID; - -const TYPE_LABEL: &str = "type"; -const EPOCH_LABEL: &str = "epoch"; -const SLOT_LABEL: &str = "slot"; -const ATTEMPT_LABEL: &str = "slot"; -const RANDOMNESS_LABEL: &str = "randomness"; - -const SLOT_VRF_TYPE_VALUE: &str = "slot-vrf"; -const TICKET_VRF_TYPE_VALUE: &str = "ticket-vrf"; - -/// Make slot VRF transcript. -pub fn make_slot_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(TYPE_LABEL.as_bytes(), SLOT_VRF_TYPE_VALUE.as_bytes()); - transcript.append_u64(SLOT_LABEL.as_bytes(), *slot); - transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); - transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); - transcript -} - -/// Make slot VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_slot_transcript_data( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - (TYPE_LABEL, VRFTranscriptValue::Bytes(SLOT_VRF_TYPE_VALUE.as_bytes().to_vec())), - (SLOT_LABEL, VRFTranscriptValue::U64(*slot)), - (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), - (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} - -/// Make ticket VRF transcript. -pub fn make_ticket_transcript(randomness: &Randomness, attempt: u32, epoch: u64) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(TYPE_LABEL.as_bytes(), TICKET_VRF_TYPE_VALUE.as_bytes()); - transcript.append_u64(ATTEMPT_LABEL.as_bytes(), attempt as u64); - transcript.append_u64(EPOCH_LABEL.as_bytes(), epoch); - transcript.append_message(RANDOMNESS_LABEL.as_bytes(), randomness); - transcript -} - -/// Make ticket VRF transcript data container. -#[cfg(feature = "std")] -pub fn make_ticket_transcript_data( - randomness: &Randomness, - attempt: u32, - epoch: u64, -) -> VRFTranscriptData { - VRFTranscriptData { - label: &SASSAFRAS_ENGINE_ID, - items: vec![ - (TYPE_LABEL, VRFTranscriptValue::Bytes(TICKET_VRF_TYPE_VALUE.as_bytes().to_vec())), - (ATTEMPT_LABEL, VRFTranscriptValue::U64(attempt as u64)), - (EPOCH_LABEL, VRFTranscriptValue::U64(epoch)), - (RANDOMNESS_LABEL, VRFTranscriptValue::Bytes(randomness.to_vec())), - ], - } -} From 80bb0a49d8f35b9ef09159e38346c3eec07d8a8d Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 8 May 2023 19:36:22 +0200 Subject: [PATCH 25/62] Frame tests overhaul --- Cargo.lock | 8 +- frame/sassafras/Cargo.toml | 2 +- frame/sassafras/src/lib.rs | 43 ++-- frame/sassafras/src/mock.rs | 33 ++- frame/sassafras/src/tests.rs | 381 ++++++++++++++++++++--------------- 5 files changed, 273 insertions(+), 194 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6573f124e255e..3506fd9213c6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3060,12 +3060,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hkdf" version = "0.12.3" @@ -6816,10 +6810,10 @@ dependencies = [ name = "pallet-sassafras" version = "0.3.1-dev" dependencies = [ + "array-bytes", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "log", "pallet-session", "pallet-timestamp", diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 22aee8632c968..41b50759f39cf 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { version = "7.0.0", default-features = false, path = "../../primit sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -hex-literal = "0.3.4" +array-bytes = "4.1" sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index f1e0e131bdcee..6831cfda382de 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -43,7 +43,7 @@ //! To anonymously publish the ticket to the chain a validator sends their tickets //! to a random validator who later puts it on-chain as a transaction. -// #![deny(warnings)] +#![deny(warnings)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] @@ -224,10 +224,10 @@ pub mod pallet { #[pallet::storage] pub type TicketsData = StorageMap<_, Identity, TicketId, TicketData, ValueQuery>; - /// Next epoch tickets temporary accumulator. - /// Special `u32::MAX` key is reserved for partially sorted segment. + /// Next epoch tickets accumulator. + /// Special `u32::MAX` key is reserved for a partially sorted segment. // This bound is set as `MaxTickets` in the unlucky case where we receive one Ticket at a time. - // The max capacity is thus MaxTickets^2. Not much, given that we save TicketIds here. + // The max capacity is thus MaxTickets^2. Not much, given that we save `TicketIds` here. #[pallet::storage] pub type NextTicketsSegments = StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; @@ -301,7 +301,7 @@ pub mod pallet { let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - // TODO-SASS-P3: use make-bytes!!! + // TODO-SASS-P3 DAVXY: use make-bytes!!! // let bytes = .... ; for the moment we just use the pre-output Self::deposit_randomness(pre_digest.vrf_signature.output.0.as_bytes()); @@ -338,8 +338,6 @@ pub mod pallet { ) -> DispatchResult { ensure_none(origin)?; - let mut metadata = TicketsMeta::::get(); - log::debug!(target: LOG_TARGET, "@@@@@@@@@@ received {} tickets", tickets.len()); // Check tickets score @@ -371,10 +369,14 @@ pub mod pallet { } } - // We just require a unique key to save the tickets ids segment. - metadata.segments_count += 1; - NextTicketsSegments::::insert(metadata.segments_count, segment); - TicketsMeta::::set(metadata); + if !segment.is_empty() { + log::debug!(target: LOG_TARGET, "@@@@@@@@@@ appending segment with {} tickets", segment.len()); + let mut metadata = TicketsMeta::::get(); + NextTicketsSegments::::insert(metadata.segments_count, segment); + metadata.segments_count += 1; + TicketsMeta::::set(metadata); + } + Ok(()) } @@ -812,11 +814,10 @@ impl Pallet { // entries. If all the segments were consumed then the sorted vector is saved as the // next epoch tickets, else it is saved to be used by next calls to this function. fn sort_tickets(max_iter: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { - let mut segments_count = metadata.segments_count; - let max_iter = max_iter.min(segments_count); + let max_iter = max_iter.min(metadata.segments_count); let max_tickets = T::MaxTickets::get() as usize; - // Fetch the partial result. + // Fetch the sorted result (if any). let mut new_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); let mut require_sort = max_iter != 0; @@ -829,37 +830,35 @@ impl Pallet { // Consume at most `max_iter` segments. for _ in 0..max_iter { - let segment = NextTicketsSegments::::take(segments_count); + metadata.segments_count -= 1; + let segment = NextTicketsSegments::::take(metadata.segments_count); + // Merge only elements below the current sorted segment sup. segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); if new_segment.len() > max_tickets { require_sort = false; + // Sort and truncnate the result new_segment.sort_unstable(); new_segment[max_tickets..].iter().for_each(|id| TicketsData::::remove(id)); new_segment.truncate(max_tickets); sup = new_segment[max_tickets - 1]; } - - segments_count -= 1; } if require_sort { new_segment.sort_unstable(); } - if segments_count == 0 { + if metadata.segments_count == 0 { // Sort is over, write to next epoch map. - // TODO-SASS-P3: is there a better way to write a map from a vector? new_segment.iter().enumerate().for_each(|(i, id)| { TicketsIds::::insert((epoch_tag, i as u32), id); }); metadata.tickets_count[epoch_tag as usize] = new_segment.len() as u32; } else { - // Keep the partial result for next invocations. + // Keep the partial result for next calls. NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); } - - // metadata.segments_count = segments_count; } /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 51b76cc77cc9c..76a02ba367d04 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -22,8 +22,8 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever}; use frame_support::traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityIndex, AuthorityPair, Slot, TicketData, TicketEnvelope, - VrfSignature, + digests::PreDigest, AuthorityIndex, AuthorityPair, SassafrasEpochConfiguration, Slot, + TicketData, TicketEnvelope, VrfSignature, }; use sp_core::{ crypto::{Pair, VrfSigner}, @@ -106,6 +106,12 @@ frame_support::construct_runtime!( } ); +// Default used under tests. +// The max redundancy factor allows to accept all submitted tickets without worrying +// about the threshold. +pub const TEST_EPOCH_CONFIGURATION: SassafrasEpochConfiguration = + SassafrasEpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 32 }; + /// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len).1 @@ -124,7 +130,8 @@ pub fn new_test_ext_with_pairs( let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let config = pallet_sassafras::GenesisConfig { authorities, epoch_config: Default::default() }; + let config = + pallet_sassafras::GenesisConfig { authorities, epoch_config: TEST_EPOCH_CONFIGURATION }; >::assimilate_storage( &config, &mut storage, @@ -190,7 +197,7 @@ pub fn make_pre_digest( pair: &AuthorityPair, ) -> PreDigest { let vrf_signature = slot_claim_vrf_signature(slot, pair); - PreDigest { authority_idx, slot, vrf_signature, ticket_aux: None } + PreDigest { authority_idx, slot, vrf_signature, ticket_claim: None } } /// Produce a `PreDigest` instance for the given parameters and wrap the result into a `Digest` @@ -206,6 +213,24 @@ pub fn make_wrapped_pre_digest( Digest { logs: vec![log] } } +pub fn initialize_block( + number: u64, + slot: Slot, + parent_hash: H256, + pair: &AuthorityPair, +) -> Digest { + let digest = make_wrapped_pre_digest(0, slot, pair); + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + digest +} + +pub fn finalize_block(number: u64) -> Header { + Sassafras::on_finalize(number); + System::finalize() +} + /// Progress the pallet state up to the given block `number` and `slot`. pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { Sassafras::on_finalize(System::block_number()); diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 0e2e11c0aee96..61eb29ae8eb7b 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -20,88 +20,91 @@ use crate::*; use mock::*; -use frame_support::traits::{OnFinalize, OnInitialize}; -use hex_literal::hex; use sp_consensus_sassafras::Slot; use sp_runtime::traits::Get; +fn h2b(hex: &str) -> [u8; N] { + array_bytes::hex2array_unchecked(hex) +} + #[test] -fn genesis_values_sanity_check() { +fn genesis_values_assumptions_check() { new_test_ext(4).execute_with(|| { assert_eq!(Sassafras::authorities().len(), 4); - assert_eq!(EpochConfig::::get(), Default::default()); + assert_eq!(EpochConfig::::get(), TEST_EPOCH_CONFIGURATION); }); } #[test] -fn slot_ticket_fetch() { +fn slot_ticket_id_fetch() { let genesis_slot = Slot::from(100); let max_tickets: u32 = ::MaxTickets::get(); assert_eq!(max_tickets, 6); - let curr_tickets: Vec = (0..max_tickets as u8) - .into_iter() - .map(|i| [i; 32].try_into().unwrap()) - .collect(); + // Current epoch tickets + let curr_tickets: Vec = (0..max_tickets).map(|i| i as TicketId).collect(); - let next_tickets: Vec = (0..(max_tickets - 1) as u8) - .into_iter() - .map(|i| [max_tickets as u8 + i; 32].try_into().unwrap()) - .collect(); + let next_tickets: Vec = + (0..max_tickets - 1).map(|i| (i + max_tickets) as TicketId).collect(); new_test_ext(4).execute_with(|| { - curr_tickets.iter().enumerate().for_each(|(i, ticket)| { - Tickets::::insert((0, i as u32), ticket); - }); - next_tickets.iter().enumerate().for_each(|(i, ticket)| { - Tickets::::insert((1, i as u32), ticket); - }); + curr_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); + + next_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); + TicketsMeta::::set(TicketsMetadata { tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], segments_count: 0, }); // Before initializing `GenesisSlot` value the pallet always return the first slot - // This is a kind of special case hardcoded policy that should never happen in practice + // This is a kind of special hardcoded case that should never happen in practice // (i.e. the first thing the pallet does is to initialize the genesis slot). - assert_eq!(Sassafras::slot_ticket(0.into()), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 100), Some(curr_tickets[1])); + + assert_eq!(Sassafras::slot_ticket_id(0.into()), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), Some(curr_tickets[1])); // Initialize genesis slot.. GenesisSlot::::set(genesis_slot); - // Try fetch a ticket for a slot before current session. - assert_eq!(Sassafras::slot_ticket(0.into()), None); - - // Current session tickets. - assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), Some(curr_tickets[3])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 2), Some(curr_tickets[5])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 3), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 4), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 5), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 6), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 7), Some(curr_tickets[4])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 8), Some(curr_tickets[2])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 9), Some(curr_tickets[0])); - - // Next session tickets. - assert_eq!(Sassafras::slot_ticket(genesis_slot + 10), Some(next_tickets[1])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 11), Some(next_tickets[3])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 12), None); //Some(next_tickets[5])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 13), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 14), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 15), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 16), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 17), Some(next_tickets[4])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 18), Some(next_tickets[2])); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 19), Some(next_tickets[0])); - - // Try fetch tickets for slots beyend next session. - assert_eq!(Sassafras::slot_ticket(genesis_slot + 20), None); - assert_eq!(Sassafras::slot_ticket(genesis_slot + 42), None); + // Try fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); + + // Next epoch tickets (note that only 5 tickets are available) + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); + + // Try fetch tickets for slots beyend next epoch. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); }); } @@ -113,9 +116,7 @@ fn on_first_block_after_genesis() { let start_slot = Slot::from(100); let start_block = 1; - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // Post-initialization status @@ -129,8 +130,7 @@ fn on_first_block_after_genesis() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(RandomnessAccumulator::::get(), [0; 32]); - Sassafras::on_finalize(1); - let header = System::finalize(); + let header = finalize_block(start_block); // Post-finalization status @@ -144,7 +144,7 @@ fn on_first_block_after_genesis() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + h2b("5c1e465b22951f401a05154f7f7fe29e18aaa8b9b2a7bda81cbe75c58193f057"), ); // Header data check @@ -172,15 +172,16 @@ fn on_normal_block() { ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; + let end_block = start_block + 1; - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We don't want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); - assert!(epoch_duration > 2); - let digest = progress_to_block(2, &pairs[0]).unwrap(); + assert!(epoch_duration > end_block); + + // Progress to block 2 + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); // Post-initialization status @@ -194,11 +195,10 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("50f7d623e15560a3681b085d0dc67b12fa16fefe5366987b58e0c16ba412a14a"), + h2b("5c1e465b22951f401a05154f7f7fe29e18aaa8b9b2a7bda81cbe75c58193f057"), ); - Sassafras::on_finalize(2); - let header = System::finalize(); + let header = finalize_block(end_block); // Post-finalization status @@ -212,7 +212,7 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - hex!("ea16f22af4afe5bfb8e3be3e257c3a88ae0c2406a4afc067871b6e5a7ae8756e"), + h2b("abc64d3d643e7d6895e7ea136d9c3507ea1e66f8ccbc0a74b0ea76f39a7a6131"), ); // Header data check @@ -230,13 +230,13 @@ fn produce_epoch_change_digest() { let start_slot = Slot::from(100); let start_block = 1; - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); - let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); + let end_block = start_block + epoch_duration; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); // Post-initialization status @@ -246,18 +246,17 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::epoch_index(), 1); assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); assert_eq!(Sassafras::current_slot_index(), 0); - assert_eq!(Sassafras::randomness(), [0; 32],); + assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), - hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + h2b("83b92b55cc8ccfb55d783a9d327132b0b39a0876e75c6f3968f5252ea338688c"), ); assert_eq!( RandomnessAccumulator::::get(), - hex!("ec9f2acd75e3a901b3a3fad95267a275af1aded3df8bebebb8d14ebd2190ce59"), + h2b("15e5ec5a96e997e7ddb5074790cea20b01978b596fee381c0ae38664c6c4a549"), ); - Sassafras::on_finalize(start_block + epoch_duration); - let header = System::finalize(); + let header = finalize_block(end_block); // Post-finalization status @@ -270,11 +269,11 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), - hex!("99da0ef0252bb8104737d1db0d80ae46079024c377f5bcecfe6545bd93c38d7b"), + h2b("83b92b55cc8ccfb55d783a9d327132b0b39a0876e75c6f3968f5252ea338688c"), ); assert_eq!( RandomnessAccumulator::::get(), - hex!("d017578d6bad1856315866ce1ef845c2584873fcbc011db7dcb99f1f19baa6f3"), + h2b("5ede4f8481c8392a0d4444e244c0ab63b72e224860752277a8b838497b7f18fa"), ); // Header data check @@ -302,23 +301,22 @@ fn produce_epoch_change_digest_with_config() { let start_slot = Slot::from(100); let start_block = 1; - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); let config = SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; Sassafras::plan_config_change(RuntimeOrigin::root(), config.clone()).unwrap(); // We want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); - let digest = progress_to_block(start_block + epoch_duration, &pairs[0]).unwrap(); + let end_block = start_block + epoch_duration; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); - Sassafras::on_finalize(start_block + epoch_duration); + let header = finalize_block(end_block); // Header data check. // Skip pallet status checks that were already performed by other tests. - let header = System::finalize(); assert_eq!(header.digest.logs.len(), 2); assert_eq!(header.digest.logs[0], digest.logs[0]); // Deposits consensus log on epoch change @@ -335,32 +333,81 @@ fn produce_epoch_change_digest_with_config() { } #[test] -fn segments_incremental_sortition_works() { +fn submit_segments_works() { let (pairs, mut ext) = new_test_ext_with_pairs(1); let pair = &pairs[0]; - let segments_num = 14; + // We're going to generate 14 segments. + let segments_count = 3; ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); - - // Submit authoring tickets in three different batches. - // We can ignore the threshold since we are not passing through the unsigned extrinsic - // validation. - let tickets: Vec = - make_tickets(start_slot + 1, segments_num * max_tickets, pair); - let segment_len = tickets.len() / segments_num as usize; - for i in 0..segments_num as usize { + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Tweak the epoch config to discard some of the tickets + let mut config = EpochConfig::::get(); + config.redundancy_factor = 3; + EpochConfig::::set(config); + + // Populate the segments via the `submit_tickets` + let tickets = make_tickets(start_slot + 1, segments_count * max_tickets, pair); + let segment_len = tickets.len() / segments_count as usize; + for i in 0..segments_count as usize { let segment = tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); } + finalize_block(start_block); + + // Check against the expected results given the known inputs + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, segments_count); + assert_eq!(meta.tickets_count, [0, 0]); + let seg = NextTicketsSegments::::get(0); + assert_eq!(seg.len(), 4); + let seg = NextTicketsSegments::::get(1); + assert_eq!(seg.len(), 6); + let seg = NextTicketsSegments::::get(2); + assert_eq!(seg.len(), 4); + }) +} + +#[test] +fn segments_incremental_sortition_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1); + let pair = &pairs[0]; + let segments_count = 14; + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Manually populate the segments to fool the threshold check + let tickets = make_tickets(start_slot + 1, segments_count * max_tickets, pair); + let segment_len = tickets.len() / segments_count as usize; + + for i in 0..segments_count as usize { + let segment: Vec = tickets[i * segment_len..(i + 1) * segment_len] + .iter() + .enumerate() + .map(|(j, ticket)| { + let ticket_id = (i * segment_len + j) as TicketId; + TicketsData::::set(ticket_id, ticket.data.clone()); + ticket_id + }) + .collect(); + let segment = BoundedVec::truncate_from(segment); + NextTicketsSegments::::insert(i as u32, segment); + } + let meta = TicketsMetadata { segments_count, tickets_count: [0, 0] }; + TicketsMeta::::set(meta); + let epoch_duration: u64 = ::EpochDuration::get(); // Proceed to half of the epoch (sortition should not have been started yet) @@ -369,10 +416,10 @@ fn segments_incremental_sortition_works() { // Check that next epoch tickets sortition is not started yet let meta = TicketsMeta::::get(); - assert_eq!(meta.segments_count, segments_num); + assert_eq!(meta.segments_count, segments_count); assert_eq!(meta.tickets_count, [0, 0]); - // Monitor incremental sortition + // Follow incremental sortition block by block progress_to_block(half_epoch_block + 1, pair); let meta = TicketsMeta::::get(); @@ -394,22 +441,23 @@ fn segments_incremental_sortition_works() { assert_eq!(meta.segments_count, 3); assert_eq!(meta.tickets_count, [0, 0]); - Sassafras::on_finalize(half_epoch_block + 4); - let header = System::finalize(); + let header = finalize_block(half_epoch_block + 4); + + // Sort should be finished. + // Check that next epoch tickets count have the correct value (6). + // Bigger values were discarded during sortition. let meta = TicketsMeta::::get(); assert_eq!(meta.segments_count, 0); assert_eq!(meta.tickets_count, [0, 6]); assert_eq!(header.digest.logs.len(), 1); // The next block will be the first produced on the new epoch, - // At this point the tickets were found to be sorted and ready to be used. + // At this point the tickets are found already sorted and ready to be used. let slot = Sassafras::current_slot() + 1; - let digest = make_wrapped_pre_digest(0, slot, pair); let number = System::block_number() + 1; - System::initialize(&number, &header.hash(), &digest); - Sassafras::on_initialize(number); - Sassafras::on_finalize(half_epoch_block + 5); - let header = System::finalize(); + initialize_block(number, slot, header.hash(), pair); + let header = finalize_block(number); + // Epoch changes digest is also produced assert_eq!(header.digest.logs.len(), 2); }); } @@ -422,23 +470,23 @@ fn submit_enact_claim_tickets() { let start_slot = Slot::from(100); let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); + let pair = &pairs[0]; - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + initialize_block(start_block, start_slot, Default::default(), pair); // We don't want to trigger an epoch change in this test. let epoch_duration: u64 = ::EpochDuration::get(); assert!(epoch_duration > 2); - let _digest = progress_to_block(2, &pairs[0]).unwrap(); + progress_to_block(2, &pairs[0]).unwrap(); - // Check state before tickets submission - assert!(Tickets::::iter().next().is_none()); + // // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, + ); - // Submit authoring tickets in three different batches. - // We can ignore the threshold since we are not passing through the unsigned extrinsic - // validation. - let tickets: Vec = make_tickets(start_slot + 1, 3 * max_tickets, &pairs[0]); + // Submit authoring tickets in three different segments. + let tickets = make_tickets(start_slot + 1, 3 * max_tickets, pair); let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), tickets0).unwrap(); let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); @@ -446,54 +494,69 @@ fn submit_enact_claim_tickets() { let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), tickets2).unwrap(); - let mut expected_tickets: Vec<_> = tickets.into_iter().map(|t| t.ticket).collect(); - expected_tickets.sort(); - expected_tickets.truncate(max_tickets as usize); - // Check state after submit - let meta = TicketsMeta::::get(); - assert!(Tickets::::iter().next().is_none()); - assert_eq!(meta.segments_count, 3); - assert_eq!(meta.tickets_count, [0, 0]); + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 3, tickets_count: [0, 0] }, + ); - // Process up to the last epoch slot (do not enact epoch change) - let _digest = progress_to_block(epoch_duration, &pairs[0]).unwrap(); + // Progress up to the last epoch slot (do not enact epoch change) + progress_to_block(epoch_duration, &pairs[0]).unwrap(); // At this point next tickets should have been sorted - let meta = TicketsMeta::::get(); - assert_eq!(meta.segments_count, 0); - assert_eq!(meta.tickets_count, [0, 6]); + // Check state after submit + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 6] }, + ); + + // Compute and sort the ids (aka ticket scores) + let mut expected_ids: Vec<_> = tickets + .iter() + .map(|t| { + let epoch_idx = Sassafras::epoch_index(); + let randomness = Sassafras::randomness(); + let input = sp_consensus_sassafras::make_ticket_vrf_transcript( + &randomness, + t.data.attempt_idx, + epoch_idx, + ); + sp_consensus_sassafras::make_ticket_value(&input, &t.vrf_preout) + }) + .collect(); + expected_ids.sort(); + expected_ids.truncate(max_tickets as usize); // Check if we can claim next epoch tickets in outside-in fashion. let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[1]); - assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[3]); - assert_eq!(Sassafras::slot_ticket(slot + 3).unwrap(), expected_tickets[5]); - assert!(Sassafras::slot_ticket(slot + 4).is_none()); - assert!(Sassafras::slot_ticket(slot + 7).is_none()); - assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[4]); - assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[2]); - assert_eq!(Sassafras::slot_ticket(slot + 10).unwrap(), expected_tickets[0]); - assert!(Sassafras::slot_ticket(slot + 11).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); + assert!(Sassafras::slot_ticket_id(slot + 4).is_none()); + assert!(Sassafras::slot_ticket_id(slot + 7).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); - // Enact session change by progressing one more block + // Enact epoch change by progressing one more block - let _digest = progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); + progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); let meta = TicketsMeta::::get(); assert_eq!(meta.segments_count, 0); assert_eq!(meta.tickets_count, [0, 6]); let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket(slot).unwrap(), expected_tickets[1]); - assert_eq!(Sassafras::slot_ticket(slot + 1).unwrap(), expected_tickets[3]); - assert_eq!(Sassafras::slot_ticket(slot + 2).unwrap(), expected_tickets[5]); - assert!(Sassafras::slot_ticket(slot + 3).is_none()); - assert!(Sassafras::slot_ticket(slot + 6).is_none()); - assert_eq!(Sassafras::slot_ticket(slot + 7).unwrap(), expected_tickets[4]); - assert_eq!(Sassafras::slot_ticket(slot + 8).unwrap(), expected_tickets[2]); - assert_eq!(Sassafras::slot_ticket(slot + 9).unwrap(), expected_tickets[0]); - assert!(Sassafras::slot_ticket(slot + 10).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); + assert!(Sassafras::slot_ticket_id(slot + 3).is_none()); + assert!(Sassafras::slot_ticket_id(slot + 6).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); }); } @@ -506,27 +569,25 @@ fn block_allowed_to_skip_epochs() { let start_block = 1; let epoch_duration: u64 = ::EpochDuration::get(); - let digest = make_wrapped_pre_digest(0, start_slot, &pairs[0]); - System::initialize(&start_block, &Default::default(), &digest); - Sassafras::on_initialize(start_block); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - let tickets: Vec = make_tickets(start_slot + 1, 3, &pairs[0]); + let tickets = make_tickets(start_slot + 1, 3, &pairs[0]); Sassafras::submit_tickets( RuntimeOrigin::none(), BoundedVec::truncate_from(tickets.clone()), ) .unwrap(); - // Force enact of next tickets + // Force sortition of next tickets (enactment) by explicitly querying next epoch tickets. assert_eq!(TicketsMeta::::get().segments_count, 1); Sassafras::slot_ticket(start_slot + epoch_duration).unwrap(); assert_eq!(TicketsMeta::::get().segments_count, 0); let next_random = NextRandomness::::get(); - // We want to trigger an skip epoch in this test. + // We want to trigger a skip epoch in this test. let offset = 3 * epoch_duration; - let _digest = go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + go_to_block(start_block + offset, start_slot + offset, &pairs[0]); // Post-initialization status @@ -540,7 +601,7 @@ fn block_allowed_to_skip_epochs() { // Tickets were discarded let meta = TicketsMeta::::get(); assert_eq!(meta, TicketsMetadata::default()); - // We've used the last known next epoch randomness as a fallback + // We used the last known next epoch randomness as a fallback assert_eq!(next_random, Sassafras::randomness()); }); } From 20fe451a54ac1dd7c635454a3470bc6b44abb47b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 10 May 2023 19:58:06 +0200 Subject: [PATCH 26/62] Fix client tests --- Cargo.lock | 1 + client/consensus/sassafras/Cargo.toml | 2 +- client/consensus/sassafras/src/tests.rs | 70 +++++++++++------------ frame/sassafras/src/lib.rs | 6 +- primitives/consensus/sassafras/src/lib.rs | 1 - test-utils/runtime/src/lib.rs | 12 +++- test-utils/runtime/src/system.rs | 1 - 7 files changed, 49 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3506fd9213c6b..ad5b5048b315f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9046,6 +9046,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "thiserror", + "tokio", ] [[package]] diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 964c0101d315c..877c9a55a30d5 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -46,4 +46,4 @@ sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -#tokio = "1.22.0" \ No newline at end of file +tokio = "1.22.0" diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index f5ee862eb392b..074d09ed5ae15 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -34,7 +34,7 @@ use sc_network_test::*; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; -use sp_consensus_sassafras::{inherents::InherentDataProvider, vrf::make_slot_transcript_data}; +use sp_consensus_sassafras::{inherents::InherentDataProvider, make_slot_vrf_transcript}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{Digest, DigestItem}; @@ -42,7 +42,7 @@ use sp_timestamp::Timestamp; use substrate_test_runtime_client::{runtime::Block as TestBlock, Backend as TestBackend}; -// Monomorphization of generic structures for test context. +// Specialization of generic structures for test context. type TestHeader = ::Header; @@ -337,21 +337,15 @@ impl TestContext { }); let epoch = self.epoch_data(&parent_hash, parent_number, slot); - let transcript_data = - make_slot_transcript_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); - let signature = self + let transcript = + make_slot_vrf_transcript(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let vrf_signature = self .keystore - .sr25519_vrf_sign(SASSAFRAS, &public, transcript_data) + .sr25519_vrf_sign(SASSAFRAS, &public, &transcript) .unwrap() .unwrap(); - let pre_digest = PreDigest { - slot, - authority_idx: 0, - vrf_output: VRFOutput(signature.output), - vrf_proof: VRFProof(signature.proof), - ticket_aux: None, - }; + let pre_digest = PreDigest { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; let digest = sp_runtime::generic::Digest { logs: vec![DigestItem::sassafras_pre_digest(pre_digest)], }; @@ -436,7 +430,7 @@ fn claim_secondary_slots_works() { { assert_eq!(claim.authority_idx as usize, auth_idx); assert_eq!(claim.slot, Slot::from(slot)); - assert_eq!(claim.ticket_aux, None); + assert_eq!(claim.ticket_claim, None); assert_eq!(auth_id.public(), auth_id2.into()); // Check that this slot has not been assigned before @@ -468,34 +462,38 @@ fn claim_primary_slots_works() { let keystore = create_test_keystore(Sr25519Keyring::Alice); - // Success if we have ticket data and the key in our keystore + // Success if we have ticket aux data and the authority key in our keystore + // ticket-aux: OK , authority-key: OK => SUCCESS let authority_idx = 0u32; - let ticket: Ticket = [0u8; 32].try_into().unwrap(); - let ticket_proof: VRFProof = [0u8; 64].try_into().unwrap(); - let ticket_aux = TicketAux { attempt: 0, proof: ticket_proof }; - epoch.tickets_aux.insert(ticket, (authority_idx, ticket_aux)); + let ticket_id = 123; + let ticket_data = + TicketData { attempt_idx: 0, erased_public: [0; 32], revealed_public: [0; 32] }; + let ticket_secret = TicketSecret { attempt_idx: 0, erased_secret: [0; 32] }; + epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret.clone())); let (pre_digest, auth_id) = - authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore).unwrap(); + authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data.clone())), &keystore) + .unwrap(); assert_eq!(pre_digest.authority_idx, authority_idx); assert_eq!(auth_id, Sr25519Keyring::Alice.public().into()); - // Fail if we don't have aux data for some ticket + // Fail if we have authority key in our keystore but not ticket aux data + // ticket-aux: KO , authority-key: OK => FAIL - let ticket: Ticket = [1u8; 32].try_into().unwrap(); - let claim = authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore); + let ticket_id = 321; + let claim = + authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data.clone())), &keystore); assert!(claim.is_none()); - // Fail if we don't have the key for the ticket owner in our keystore - // (even though we have associated data, it doesn't matter) + // Fail if we have ticket aux data but not the authority key in out keystore + // ticket-aux: OK , authority-key: KO => FAIL - let authority_idx = 1u32; - let ticket_proof: VRFProof = [0u8; 64].try_into().unwrap(); - let ticket_aux = TicketAux { attempt: 0, proof: ticket_proof }; - epoch.tickets_aux.insert(ticket, (authority_idx, ticket_aux)); - let claim = authorship::claim_slot(0.into(), &epoch, Some(ticket), &keystore); + let authority_idx = 1u32; // we don't have this key + let ticket_id = 666; + epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret)); + let claim = authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data)), &keystore); assert!(claim.is_none()); } @@ -794,12 +792,12 @@ fn verify_block_claimed_via_secondary_method() { let _out_params = env.verify_block(in_params); } -//================================================================================================= -// More complex tests involving communication between multiple nodes. -// -// These tests are performed via a specially crafted test network. -// Closer to integration test than unit tests... -//================================================================================================= +// //================================================================================================= +// // More complex tests involving communication between multiple nodes. +// // +// // These tests are performed via a specially crafted test network. +// // Closer to integration test than unit tests... +// //================================================================================================= impl Environment for TestContext { type CreateProposer = future::Ready>; diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 6831cfda382de..f3bd8ac832bfe 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -797,12 +797,12 @@ impl Pallet { } } - /// Returns ticket data associated to the given `slot`. + /// Returns ticket id and data associated to the given `slot`. /// /// Refer to the `slot_ticket_id` documentation for the slot-ticket association /// criteria. - pub fn slot_ticket(slot: Slot) -> Option { - Self::slot_ticket_id(slot).map(|id| TicketsData::::get(id)) + pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketData)> { + Self::slot_ticket_id(slot).map(|id| (id, TicketsData::::get(id))) } // Lexicographically sort the tickets who belongs to the next epoch. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 395f3f2e85a26..217c2b63f914b 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -248,7 +248,6 @@ pub fn make_slot_vrf_transcript(randomness: &Randomness, slot: Slot, epoch: u64) /// Make ticket VRF transcript data container. /// /// Input randomness is current epoch randomness. -#[cfg(feature = "std")] pub fn make_ticket_vrf_transcript( randomness: &Randomness, attempt: u32, diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 8041d7df6cb38..47c0b1cbb26cd 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1019,7 +1019,11 @@ cfg_if! { epoch } - fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + >::slot_ticket_id(slot) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketData)> { >::slot_ticket(slot) } @@ -1371,7 +1375,11 @@ cfg_if! { epoch } - fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option { + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + >::slot_ticket_id(slot) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketData)> { >::slot_ticket(slot) } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index df010e4e85c06..533c23c64357c 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -303,7 +303,6 @@ fn execute_read(read: u32, panic_at_end: bool) -> ApplyExtrinsicResult { panic!("BYE") } else { Ok(Ok(())) ->>>>>>> master } } From bc05e9f8e49c32c9a722ea03d135a49181d77239 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 11 May 2023 19:25:02 +0200 Subject: [PATCH 27/62] Fix after master merge --- Cargo.lock | 1 + bin/node-sassafras/node/Cargo.toml | 1 + bin/node-sassafras/node/src/service.rs | 38 +++++++++----------------- bin/node-sassafras/runtime/src/lib.rs | 1 + frame/sassafras/src/mock.rs | 6 ++-- test-utils/runtime/src/genesismap.rs | 16 ++++++++--- 6 files changed, 31 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc8db8a9c5b63..93f95edc3f504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5487,6 +5487,7 @@ dependencies = [ "sc-consensus-sassafras", "sc-executor", "sc-keystore", + "sc-network", "sc-rpc", "sc-rpc-api", "sc-service", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 0a5eeb17e9ace..fddc3636477df 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -21,6 +21,7 @@ clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-service = { version = "0.10.0-dev", path = "../../../client/service" } sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index aea061760b18a..6a21e5c538df2 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -46,16 +46,10 @@ pub fn new_partial( sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( + sc_consensus_sassafras::SassafrasBlockImport, + sc_consensus_sassafras::SassafrasLink, + sc_consensus_grandpa::LinkHalf, Option, - ( - sc_consensus_sassafras::SassafrasBlockImport< - Block, - FullClient, - FullGrandpaBlockImport, - >, - sc_consensus_grandpa::LinkHalf, - sc_consensus_sassafras::SassafrasLink, - ), ), >, ServiceError, @@ -71,12 +65,7 @@ pub fn new_partial( }) .transpose()?; - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); + let executor = sc_service::new_native_or_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( @@ -140,8 +129,6 @@ pub fn new_partial( telemetry.as_ref().map(|x| x.handle()), )?; - let import_setup = (sassafras_block_import, grandpa_link, sassafras_link); - Ok(sc_service::PartialComponents { client, backend, @@ -150,12 +137,12 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - other: (telemetry, import_setup), + other: (sassafras_block_import, sassafras_link, grandpa_link, telemetry), }) } /// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { +pub fn new_full(config: Configuration) -> Result { let sc_service::PartialComponents { client, backend, @@ -164,20 +151,20 @@ pub fn new_full(mut config: Configuration) -> Result keystore_container, select_chain, transaction_pool, - other: (mut telemetry, import_setup), + other: (block_import, sassafras_link, grandpa_link, mut telemetry), } = new_partial(&config)?; - let (block_import, grandpa_link, sassafras_link) = import_setup; + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), &config.chain_spec, ); - config - .network - .extra_sets - .push(sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone())); + net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config( + grandpa_protocol_name.clone(), + )); + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), @@ -187,6 +174,7 @@ pub fn new_full(mut config: Configuration) -> Result let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, + net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 647e39c6f4920..0f045fe0879c9 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -251,6 +251,7 @@ impl pallet_transaction_payment::Config for Runtime { impl pallet_sudo::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; } #[cfg(feature = "use-session-pallet")] diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 76a02ba367d04..d36e25d609424 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -26,7 +26,7 @@ use sp_consensus_sassafras::{ TicketData, TicketEnvelope, VrfSignature, }; use sp_core::{ - crypto::{Pair, VrfSigner}, + crypto::{Pair, VrfSecret}, H256, U256, }; use sp_runtime::{ @@ -157,7 +157,7 @@ fn make_ticket(slot: Slot, attempt: u32, pair: &AuthorityPair) -> TicketEnvelope // TODO DAVXY: NOT REQUIRED ONCE WE HAVE THE NEW API... // (i.e. we just require the preout) - let signature = pair.as_ref().vrf_sign(&transcript); + let signature = pair.as_ref().vrf_sign(&transcript.into()); // TODO DAVXY: use some well known valid test keys... let data = @@ -187,7 +187,7 @@ fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { } let transcript = sp_consensus_sassafras::make_slot_vrf_transcript(&randomness, slot, epoch); - pair.as_ref().vrf_sign(&transcript) + pair.as_ref().vrf_sign(&transcript.into()) } /// Produce a `PreDigest` instance for the given parameters. diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 3652e73705789..3c52af76da79d 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -19,7 +19,7 @@ use super::{ currency, substrate_test_pallet, wasm_binary_unwrap, AccountId, AuthorityId, Balance, - GenesisConfig, + GenesisConfig, SassafrasId, }; use codec::Encode; use sc_service::construct_genesis_block; @@ -114,11 +114,19 @@ impl GenesisStorageBuilder { epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), }, sassafras: pallet_sassafras::GenesisConfig { - authorities: self.authorities.clone().into_iter().map(|x| (x, 1)).collect(), - epoch_config: Some(sp_consensus_sassafras::SassafrasEpochConfiguration { + authorities: self + .authorities + .clone() + .into_iter() + .map(|x| { + let inner: sr25519::Public = x.into(); + (SassafrasId::from(inner), 1) + }) + .collect(), + epoch_config: sp_consensus_sassafras::SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32, - }), + }, }, substrate_test: substrate_test_pallet::GenesisConfig { authorities: self.authorities.clone(), From 1906ce985fef42d03dae0c919196512d167cd2f8 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 12 May 2023 12:03:53 +0200 Subject: [PATCH 28/62] Seal should be removed --- client/consensus/sassafras/src/authorship.rs | 3 ++ client/consensus/sassafras/src/tests.rs | 49 +------------------ .../consensus/sassafras/src/verification.rs | 22 +++++---- 3 files changed, 17 insertions(+), 57 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 718bb4ceac190..d5ee9f8ad4818 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -294,6 +294,9 @@ where sc_consensus::BlockImportParams>::Transaction>, ConsensusError, > { + // TODO DAVXY SASS-32: this seal may be revisited. + // We already have a VRF signature, this could be completelly redundant. + // The header.hash() can be added to the VRF signed data. let signature = self .keystore .sign_with( diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 074d09ed5ae15..817db24be3115 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -91,10 +91,7 @@ const SLOT_DURATION: u64 = 1000; struct TestProposer { client: Arc, - link: SassafrasLink, parent_hash: Hash, - parent_number: u64, - parent_slot: Slot, } impl TestProposer { @@ -122,46 +119,11 @@ impl Proposer for TestProposer { let block_builder = self.client.new_block_at(self.parent_hash, inherent_digests, false).unwrap(); - let mut block = match block_builder.build().map_err(|e| e.into()) { + let block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, Err(e) => return future::ready(Err(e)), }; - // Currently the test runtime doesn't invoke each pallet Hooks such as `on_initialize` and - // `on_finalize`. Thus we have to manually figure out if we should add a consensus digest. - - let this_slot = crate::find_pre_digest::(block.header()) - .expect("baked block has valid pre-digest") - .slot; - - let epoch_changes = self.link.epoch_changes.shared_data(); - let epoch = epoch_changes - .epoch_data_for_child_of( - descendent_query(&*self.client), - &self.parent_hash, - self.parent_number, - this_slot, - |slot| Epoch::genesis(&self.link.genesis_config, slot), - ) - .expect("client has data to find epoch") - .expect("can compute epoch for baked block"); - - let first_in_epoch = self.parent_slot < epoch.start_slot; - if first_in_epoch { - // push a `Consensus` digest signalling next change. - // we just reuse the same randomness and authorities as the prior - // epoch. this will break when we add light client support, since - // that will re-check the randomness logic off-chain. - let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { - authorities: epoch.config.authorities.clone(), - randomness: epoch.config.randomness, - config: None, - }) - .encode(); - let digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, digest_data); - block.header.digest_mut().push(digest) - } - future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) } } @@ -341,7 +303,7 @@ impl TestContext { make_slot_vrf_transcript(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); let vrf_signature = self .keystore - .sr25519_vrf_sign(SASSAFRAS, &public, &transcript) + .sr25519_vrf_sign(SASSAFRAS, &public, &transcript.into()) .unwrap() .unwrap(); @@ -805,16 +767,9 @@ impl Environment for TestContext { type Error = TestError; fn init(&mut self, parent_header: &TestHeader) -> Self::CreateProposer { - let parent_slot = crate::find_pre_digest::(parent_header) - .expect("parent header has a pre-digest") - .slot; - future::ready(Ok(TestProposer { client: self.client.clone(), - link: self.link.clone(), parent_hash: parent_header.hash(), - parent_number: *parent_header.number(), - parent_slot, })) } } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index f654bf55ee74c..e6de2d4c3cf35 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -45,8 +45,8 @@ struct VerificationParams<'a, B: 'a + BlockT> { struct VerifiedHeaderInfo { /// Authority index. authority_id: AuthorityId, - // /// Seal found within the header. - // seal: DigestItem, + /// Seal found within the header. + seal: DigestItem, } /// Check a header has been signed by the right key. If the slot is too far in @@ -61,11 +61,18 @@ struct VerifiedHeaderInfo { fn check_header( params: VerificationParams, ) -> Result, Error> { - let VerificationParams { header, pre_digest, slot_now, epoch, origin, maybe_ticket } = params; + let VerificationParams { mut header, pre_digest, slot_now, epoch, origin, maybe_ticket } = + params; let config = &epoch.config; + let seal = header + .digest_mut() + .pop() + .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + // Check that the slot is not in the future, with some drift being allowed. if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { + header.digest_mut().push(seal); return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } @@ -90,11 +97,6 @@ fn check_header( return Err(sassafras_err(Error::VrfVerificationFailed)) } - // let seal = header - // .digest_mut() - // .pop() - // .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; - // let signature = seal // .as_sassafras_seal() // .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; @@ -132,7 +134,7 @@ fn check_header( }, } - let info = VerifiedHeaderInfo { authority_id }; + let info = VerifiedHeaderInfo { authority_id, seal }; Ok(CheckedHeader::Checked(header, info)) } @@ -440,7 +442,7 @@ where block.header = pre_header; block.post_hash = Some(hash); // TODO DAVXY: seal required??? - // block.post_digests.push(verified_info.seal); + block.post_digests.push(verified_info.seal); block.insert_intermediate( INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }, From 93542043680f9c73bbf65ffa120d953ae4f47575 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 12 May 2023 15:10:13 +0200 Subject: [PATCH 29/62] Isolate ticketing structs and functions in separate module --- client/consensus/sassafras/src/authorship.rs | 39 ++--- client/consensus/sassafras/src/lib.rs | 14 +- client/consensus/sassafras/src/tests.rs | 14 +- .../consensus/sassafras/src/verification.rs | 8 +- frame/sassafras/src/lib.rs | 14 +- frame/sassafras/src/mock.rs | 14 +- frame/sassafras/src/tests.rs | 33 +++-- primitives/consensus/sassafras/src/digests.rs | 4 +- primitives/consensus/sassafras/src/lib.rs | 136 +---------------- primitives/consensus/sassafras/src/ticket.rs | 137 ++++++++++++++++++ 10 files changed, 206 insertions(+), 207 deletions(-) create mode 100644 primitives/consensus/sassafras/src/ticket.rs diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index d5ee9f8ad4818..ee8fc50baeb92 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -20,10 +20,9 @@ use super::*; -use sp_application_crypto::Wraps; use sp_consensus_sassafras::{ - digests::PreDigest, make_slot_vrf_transcript, make_ticket_vrf_transcript, AuthorityId, Slot, - TicketClaim, TicketData, TicketEnvelope, TicketId, + digests::PreDigest, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketClaim, TicketData, + TicketEnvelope, TicketId, }; use sp_core::{twox_64, ByteArray}; @@ -77,9 +76,9 @@ pub(crate) fn claim_slot( let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; - let transcript = make_slot_vrf_transcript(&config.randomness, slot, epoch.epoch_idx); + let vrf_input = slot_claim_vrf_input(&config.randomness, slot, epoch.epoch_idx); let vrf_signature = keystore - .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &transcript.into()) + .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_input.into()) .ok() .flatten()?; @@ -97,7 +96,7 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec Vec(b"context", &transcript, &signature.output) - .map(|bytes| u128::from_le_bytes(bytes)) - .ok()?; + let vrf_preout = keystore + .sr25519_vrf_output(AuthorityId::ID, authority_id.as_ref(), &vrf_input) + .ok()??; + let ticket_id = ticket_id(&vrf_input, &vrf_preout); if ticket_id >= threshold { return None } @@ -141,12 +127,11 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec(header: &B::Header) -> Result> if header.number().is_zero() { // Genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code - use sp_consensus_sassafras::VrfTranscript; + use sp_consensus_sassafras::VrfInput; use sp_core::crypto::VrfSecret; let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); - let transcript = VrfTranscript::new(b"", &[]); - let vrf_signature = pair.as_ref().vrf_sign(&transcript.into()); + let input = VrfInput::new(b"", &[]); + let vrf_signature = pair.as_ref().vrf_sign(&input.into()); return Ok(PreDigest { authority_idx: 0, slot: 0.into(), ticket_claim: None, vrf_signature }) } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 817db24be3115..f4e93798a1660 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -18,10 +18,10 @@ //! Sassafras client tests -// TODO-SASS-P2 -// Missing interesting tests: +// TODO-SASS-P3 +// Missing tests // - verify block claimed via primary method - +// - tests using tickets to claim slots. Curret tests just doesn't register any on-chain ticket use super::*; use futures::executor::block_on; @@ -34,7 +34,7 @@ use sc_network_test::*; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; -use sp_consensus_sassafras::{inherents::InherentDataProvider, make_slot_vrf_transcript}; +use sp_consensus_sassafras::{inherents::InherentDataProvider, slot_claim_vrf_input}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{Digest, DigestItem}; @@ -299,11 +299,11 @@ impl TestContext { }); let epoch = self.epoch_data(&parent_hash, parent_number, slot); - let transcript = - make_slot_vrf_transcript(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let vrf_input = + slot_claim_vrf_input(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); let vrf_signature = self .keystore - .sr25519_vrf_sign(SASSAFRAS, &public, &transcript.into()) + .sr25519_vrf_sign(SASSAFRAS, &public, &vrf_input.into()) .unwrap() .unwrap(); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index e6de2d4c3cf35..f83a4223ca05c 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -85,14 +85,10 @@ fn check_header( // Check slot-vrf proof - // TODO DAVXY: probably there is not need to also add an explicit `Seal` - // it would be just redundant and we can just push the block header hash within - // the slot-vrf-transcript - - let transcript = make_slot_vrf_transcript(&config.randomness, pre_digest.slot, epoch.epoch_idx); + let vrf_input = slot_claim_vrf_input(&config.randomness, pre_digest.slot, epoch.epoch_idx); if !authority_id .as_inner_ref() - .vrf_verify(&transcript.into(), &pre_digest.vrf_signature) + .vrf_verify(&vrf_input.into(), &pre_digest.vrf_signature) { return Err(sassafras_err(Error::VrfVerificationFailed)) } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index f3bd8ac832bfe..34119821a3240 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -345,7 +345,7 @@ pub mod pallet { let epoch_config = EpochConfig::::get(); // Current slot should be less than half of epoch duration. let epoch_duration = T::EpochDuration::get(); - let threshold = sp_consensus_sassafras::compute_ticket_id_threshold( + let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( epoch_config.redundancy_factor, epoch_duration as u32, epoch_config.attempts_number, @@ -357,15 +357,17 @@ pub mod pallet { let mut segment = BoundedVec::with_max_capacity(); for ticket in tickets.iter() { - let input = sp_consensus_sassafras::make_ticket_vrf_transcript( + let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( &randomness, ticket.data.attempt_idx, epoch_idx, ); - let id = sp_consensus_sassafras::make_ticket_value(&input, &ticket.vrf_preout); - if id < threshold { - TicketsData::::set(id, ticket.data.clone()); - segment.try_push(id).expect("has same length as bounded input vector; qed"); + let ticket_id = sp_consensus_sassafras::ticket_id(&vrf_input, &ticket.vrf_preout); + if ticket_id < ticket_threshold { + TicketsData::::set(ticket_id, ticket.data.clone()); + segment + .try_push(ticket_id) + .expect("has same length as bounded input vector; qed"); } } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index d36e25d609424..337a8d346cef2 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -152,17 +152,13 @@ fn make_ticket(slot: Slot, attempt: u32, pair: &AuthorityPair) -> TicketEnvelope randomness = crate::NextRandomness::::get(); } - let transcript = - sp_consensus_sassafras::make_ticket_vrf_transcript(&randomness, attempt, epoch); - - // TODO DAVXY: NOT REQUIRED ONCE WE HAVE THE NEW API... - // (i.e. we just require the preout) - let signature = pair.as_ref().vrf_sign(&transcript.into()); + let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt, epoch); + let vrf_preout = pair.as_ref().vrf_output(&vrf_input.into()); // TODO DAVXY: use some well known valid test keys... let data = TicketData { attempt_idx: attempt, erased_public: [0; 32], revealed_public: [0; 32] }; - TicketEnvelope { data, vrf_preout: signature.output, ring_proof: () } + TicketEnvelope { data, vrf_preout, ring_proof: () } } /// Construct at most `attempts` tickets for the given `slot`. @@ -186,8 +182,8 @@ fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { randomness = crate::NextRandomness::::get(); } - let transcript = sp_consensus_sassafras::make_slot_vrf_transcript(&randomness, slot, epoch); - pair.as_ref().vrf_sign(&transcript.into()) + let vrf_input = sp_consensus_sassafras::slot_claim_vrf_input(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&vrf_input.into()) } /// Produce a `PreDigest` instance for the given parameters. diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 61eb29ae8eb7b..6bf8f37bcdfaa 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -27,6 +27,11 @@ fn h2b(hex: &str) -> [u8; N] { array_bytes::hex2array_unchecked(hex) } +#[allow(unused)] +fn b2h(bytes: [u8; N]) -> String { + array_bytes::bytes2hex("", &bytes) +} + #[test] fn genesis_values_assumptions_check() { new_test_ext(4).execute_with(|| { @@ -144,7 +149,7 @@ fn on_first_block_after_genesis() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - h2b("5c1e465b22951f401a05154f7f7fe29e18aaa8b9b2a7bda81cbe75c58193f057"), + h2b("ad57850fef75c0d256889233a5b1e6994af8b994fa6fb17759ff3906307f675d"), ); // Header data check @@ -195,7 +200,7 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - h2b("5c1e465b22951f401a05154f7f7fe29e18aaa8b9b2a7bda81cbe75c58193f057"), + h2b("ad57850fef75c0d256889233a5b1e6994af8b994fa6fb17759ff3906307f675d"), ); let header = finalize_block(end_block); @@ -212,7 +217,7 @@ fn on_normal_block() { assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!( RandomnessAccumulator::::get(), - h2b("abc64d3d643e7d6895e7ea136d9c3507ea1e66f8ccbc0a74b0ea76f39a7a6131"), + h2b("0bc8cce9f44a6dd90d9abd4486dfc36023a81839fac93d035ff01ef5c7a62ba8"), ); // Header data check @@ -249,11 +254,11 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), - h2b("83b92b55cc8ccfb55d783a9d327132b0b39a0876e75c6f3968f5252ea338688c"), + h2b("72801624ceaf56c6d07a07e683643d92e91eadd09e06cb4cbe0ffe1edf6e94a1"), ); assert_eq!( RandomnessAccumulator::::get(), - h2b("15e5ec5a96e997e7ddb5074790cea20b01978b596fee381c0ae38664c6c4a549"), + h2b("eb9f571fa1e2f428b81ddb33d428051cb8793227934ed50469d4ad2a84997820"), ); let header = finalize_block(end_block); @@ -269,11 +274,11 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!( NextRandomness::::get(), - h2b("83b92b55cc8ccfb55d783a9d327132b0b39a0876e75c6f3968f5252ea338688c"), + h2b("72801624ceaf56c6d07a07e683643d92e91eadd09e06cb4cbe0ffe1edf6e94a1"), ); assert_eq!( RandomnessAccumulator::::get(), - h2b("5ede4f8481c8392a0d4444e244c0ab63b72e224860752277a8b838497b7f18fa"), + h2b("d4b9d766b937902735d6423b10f3783bb384d738dd2b8d61031de406301fff8e"), ); // Header data check @@ -348,7 +353,7 @@ fn submit_segments_works() { // Tweak the epoch config to discard some of the tickets let mut config = EpochConfig::::get(); - config.redundancy_factor = 3; + config.redundancy_factor = 1; EpochConfig::::set(config); // Populate the segments via the `submit_tickets` @@ -367,11 +372,11 @@ fn submit_segments_works() { assert_eq!(meta.segments_count, segments_count); assert_eq!(meta.tickets_count, [0, 0]); let seg = NextTicketsSegments::::get(0); - assert_eq!(seg.len(), 4); + assert_eq!(seg.len(), 3); let seg = NextTicketsSegments::::get(1); - assert_eq!(seg.len(), 6); + assert_eq!(seg.len(), 1); let seg = NextTicketsSegments::::get(2); - assert_eq!(seg.len(), 4); + assert_eq!(seg.len(), 2); }) } @@ -510,18 +515,18 @@ fn submit_enact_claim_tickets() { TicketsMetadata { segments_count: 0, tickets_count: [0, 6] }, ); - // Compute and sort the ids (aka ticket scores) + // Compute and sort the tickets ids (aka tickets scores) let mut expected_ids: Vec<_> = tickets .iter() .map(|t| { let epoch_idx = Sassafras::epoch_index(); let randomness = Sassafras::randomness(); - let input = sp_consensus_sassafras::make_ticket_vrf_transcript( + let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( &randomness, t.data.attempt_idx, epoch_idx, ); - sp_consensus_sassafras::make_ticket_value(&input, &t.vrf_preout) + sp_consensus_sassafras::ticket_id(&vrf_input, &t.vrf_preout) }) .collect(); expected_ids.sort(); diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 3df1ac18fb46e..d44d227b7ed9a 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Private implementation details of Sassafras digests. use super::{ - AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, SassafrasAuthorityWeight, - SassafrasEpochConfiguration, Slot, TicketClaim, SASSAFRAS_ENGINE_ID, + ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, + SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 217c2b63f914b..6838f4d175762 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -32,10 +32,16 @@ use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; -pub use sp_core::sr25519::vrf::{VrfOutput, VrfProof, VrfSignature, VrfTranscript}; +pub use sp_core::sr25519::vrf::{VrfInput, VrfOutput, VrfProof, VrfSignData, VrfSignature}; pub mod digests; pub mod inherents; +pub mod ticket; + +pub use ticket::{ + slot_claim_vrf_input, ticket_id, ticket_id_threshold, ticket_id_vrf_input, TicketClaim, + TicketData, TicketEnvelope, TicketId, TicketSecret, +}; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; @@ -128,87 +134,6 @@ pub struct SassafrasEpochConfiguration { pub attempts_number: u32, } -/// Ticket identifier. -pub type TicketId = u128; - -/// TODO DAVXY -/// input obtained via `make_vrf_input_transcript` -pub fn make_ticket_value(_in: &VrfTranscript, out: &VrfOutput) -> TicketId { - // TODO DAVXY temporary way to generate id... use io.make_bytes() - let preout = out; - let mut raw: [u8; 16] = [0; 16]; - raw.copy_from_slice(&preout.0 .0[0..16]); - u128::from_le_bytes(raw) -} - -/// Ticket value. -// TODO: potentially this can be opaque to separate the protocol from the application -#[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketData { - /// Attempt index. - pub attempt_idx: u32, - /// Ed25519 public key which gets erased when claiming the ticket. - pub erased_public: [u8; 32], - /// Ed25519 public key which gets exposed when claiming the ticket. - pub revealed_public: [u8; 32], -} - -/// Ticket ZK commitment proof. -/// TODO-SASS-P3: this is a placeholder. -pub type TicketRingProof = (); - -/// Ticket envelope used on submission. -// TODO-SASS-P3: we are currently using Shnorrkel structures as placeholders. -// Should switch to new RVRF primitive soon. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketEnvelope { - /// VRF output. - pub data: TicketData, - /// VRF pre-output used to generate the ticket id. - pub vrf_preout: VrfOutput, - // /// Pedersen VRF signature - // pub ped_signature: (), - /// Ring VRF proof. - pub ring_proof: TicketRingProof, -} - -/// Ticket private auxiliary information. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketSecret { - /// Attempt index. - pub attempt_idx: u32, - /// Ed25519 used to claim ticket ownership. - pub erased_secret: [u8; 32], -} - -/// Ticket claim information filled by the block author. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketClaim { - pub erased_signature: [u8; 64], -} - -/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: -/// - x: redundancy factor; -/// - s: number of slots in epoch; -/// - a: max number of attempts; -/// - v: number of validator in epoch. -/// The parameters should be chosen such that T <= 1. -/// If `attempts * validators` is zero then we fallback to T = 0 -// TODO-SASS-P3: this formula must be double-checked... -pub fn compute_ticket_id_threshold( - redundancy: u32, - slots: u32, - attempts: u32, - validators: u32, -) -> TicketId { - let den = attempts as u64 * validators as u64; - let num = redundancy as u64 * slots as u64; - TicketId::max_value() - .checked_div(den.into()) - .unwrap_or_default() - .saturating_mul(num.into()) -} - /// An opaque type used to represent the key ownership proof at the runtime API boundary. /// The inner value is an encoded representation of the actual key ownership proof which will be /// parameterized when defining the runtime. At the runtime API boundary this type is unknown and @@ -217,53 +142,6 @@ pub fn compute_ticket_id_threshold( #[derive(Decode, Encode, PartialEq, TypeInfo)] pub struct OpaqueKeyOwnershipProof(Vec); -// impl OpaqueKeyOwnershipProof { -// /// Create a new `OpaqueKeyOwnershipProof` using the given encoded representation. -// pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { -// OpaqueKeyOwnershipProof(inner) -// } - -// /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key -// /// ownership proof type. -// pub fn decode(self) -> Option { -// Decode::decode(&mut &self.0[..]).ok() -// } -// } - -/// Make per slot randomness VRF input transcript. -/// -/// Input randomness is current epoch randomness. -pub fn make_slot_vrf_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfTranscript { - VrfTranscript::new( - &SASSAFRAS_ENGINE_ID, - &[ - (b"type", b"slot-transcript"), - (b"slot", &slot.to_le_bytes()), - (b"epoch", &epoch.to_le_bytes()), - (b"randomness", randomness), - ], - ) -} - -/// Make ticket VRF transcript data container. -/// -/// Input randomness is current epoch randomness. -pub fn make_ticket_vrf_transcript( - randomness: &Randomness, - attempt: u32, - epoch: u64, -) -> VrfTranscript { - VrfTranscript::new( - &SASSAFRAS_ENGINE_ID, - &[ - (b"type", b"ticket-transcript"), - (b"attempt", &attempt.to_le_bytes()), - (b"epoch", &epoch.to_le_bytes()), - (b"randomness", randomness), - ], - ) -} - // Runtime API. sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs new file mode 100644 index 0000000000000..b81b0ec2dcfd7 --- /dev/null +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives related to tickets. + +use super::{Randomness, SASSAFRAS_ENGINE_ID}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_consensus_slots::Slot; +use sp_core::sr25519::vrf::{VrfInput, VrfOutput}; + +/// Ticket identifier. +/// +/// Within the algorithm this is also used as a ticket score applied to bound +/// the ticket to a epoch's slot. +pub type TicketId = u128; + +/// Ticket data persisted on-chain. +#[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketData { + /// Attempt index. + pub attempt_idx: u32, + /// Ed25519 public key which gets erased when claiming the ticket. + pub erased_public: [u8; 32], + /// Ed25519 public key which gets exposed when claiming the ticket. + pub revealed_public: [u8; 32], +} + +/// Ticket ring proof. +/// TODO-SASS-P3: this is a placeholder. +pub type TicketRingProof = (); + +/// Ticket envelope used on during submission. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketEnvelope { + /// VRF output. + pub data: TicketData, + /// VRF pre-output used to generate the ticket id. + pub vrf_preout: VrfOutput, + // /// Pedersen VRF signature + // pub ped_signature: (), + /// Ring VRF proof. + pub ring_proof: TicketRingProof, +} + +/// Ticket auxiliary information used to claim the ticket ownership. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketSecret { + /// Attempt index. + pub attempt_idx: u32, + /// Ed25519 used to claim ticket ownership. + pub erased_secret: [u8; 32], +} + +/// Ticket claim information filled by the block author. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketClaim { + pub erased_signature: [u8; 64], +} + +/// VRF input to claim slot ownership during block production. +/// +/// Input randomness is current epoch randomness. +pub fn slot_claim_vrf_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { + VrfInput::new( + &SASSAFRAS_ENGINE_ID, + &[ + (b"type", b"ticket-claim-transcript"), + (b"slot", &slot.to_le_bytes()), + (b"epoch", &epoch.to_le_bytes()), + (b"randomness", randomness), + ], + ) +} + +/// VRF input to generate the ticket id. +/// +/// Input randomness is current epoch randomness. +pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { + VrfInput::new( + &SASSAFRAS_ENGINE_ID, + &[ + (b"type", b"ticket-id-transcript"), + (b"attempt", &attempt.to_le_bytes()), + (b"epoch", &epoch.to_le_bytes()), + (b"randomness", randomness), + ], + ) +} + +/// Get ticket-id for a given vrf input and output. +/// +/// Input generally obtained via `ticket_id_vrf_input`. +/// Output can be obtained directly using the vrf secret key or from the signature. +// TODO DAVXY temporary way to generate id... use io.make_bytes() +pub fn ticket_id(_in: &VrfInput, out: &VrfOutput) -> TicketId { + let preout = out; + let mut raw: [u8; 16] = [0; 16]; + raw.copy_from_slice(&preout.0 .0[0..16]); + u128::from_le_bytes(raw) +} + +/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: +/// - x: redundancy factor; +/// - s: number of slots in epoch; +/// - a: max number of attempts; +/// - v: number of validator in epoch. +/// The parameters should be chosen such that T <= 1. +/// If `attempts * validators` is zero then we fallback to T = 0 +// TODO-SASS-P3: this formula must be double-checked... +pub fn ticket_id_threshold( + redundancy: u32, + slots: u32, + attempts: u32, + validators: u32, +) -> TicketId { + let den = attempts as u64 * validators as u64; + let num = redundancy as u64 * slots as u64; + TicketId::max_value() + .checked_div(den.into()) + .unwrap_or_default() + .saturating_mul(num.into()) +} From 8aa8eaff3d41187ebeffa710a8fb50870fcc5bb5 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 12 May 2023 16:38:58 +0200 Subject: [PATCH 30/62] Use 'make_bytes' to generate randomness and ticket-id --- frame/sassafras/src/lib.rs | 32 +++++++++++++++++--- frame/sassafras/src/tests.rs | 30 ++++++++++-------- primitives/consensus/sassafras/src/ticket.rs | 16 ++++++---- 3 files changed, 56 insertions(+), 22 deletions(-) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 34119821a3240..3df4095335e3e 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -56,7 +56,7 @@ use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, Slot, TicketData, TicketEnvelope, - TicketId, SASSAFRAS_ENGINE_ID, + TicketId, RANDOMNESS_LENGTH, RANDOMNESS_VRF_CONTEXT, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -301,9 +301,33 @@ pub mod pallet { let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - // TODO-SASS-P3 DAVXY: use make-bytes!!! - // let bytes = .... ; for the moment we just use the pre-output - Self::deposit_randomness(pre_digest.vrf_signature.output.0.as_bytes()); + // TODO DAVXY P32: probably with the new vrf we don't need the authority id + // `inout` is sufficent + let authority_idx = pre_digest.authority_idx; + let authorities = Authorities::::get(); + let authority_id = authorities + .get(authority_idx as usize) + .expect("Authority should be valid at this point; qed"); + + // TODO DAVXY: check if is a disabled validator + + let vrf_input = sp_consensus_sassafras::slot_claim_vrf_input( + &Self::randomness(), + CurrentSlot::::get(), + EpochIndex::::get(), + ); + + let vrf_preout = &pre_digest.vrf_signature.output; + + let randomness = vrf_preout + .make_bytes::( + RANDOMNESS_VRF_CONTEXT, + &vrf_input, + authority_id.0.as_ref(), + ) + .expect("Can't fail? TODO DAVXY"); + + Self::deposit_randomness(&randomness); // If we are in the epoch's second half, we start sorting the next epoch tickets. let epoch_duration = T::EpochDuration::get(); diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 6bf8f37bcdfaa..dbb0d3af87cb9 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -27,7 +27,6 @@ fn h2b(hex: &str) -> [u8; N] { array_bytes::hex2array_unchecked(hex) } -#[allow(unused)] fn b2h(bytes: [u8; N]) -> String { array_bytes::bytes2hex("", &bytes) } @@ -147,9 +146,10 @@ fn on_first_block_after_genesis() { assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("ad57850fef75c0d256889233a5b1e6994af8b994fa6fb17759ff3906307f675d"), + h2b("c3bcc82b9636bf12a9ba858ea6855b0b5a7a57803370e57cd87223f9d8d1a896"), ); // Header data check @@ -198,9 +198,10 @@ fn on_normal_block() { assert_eq!(Sassafras::current_slot_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("ad57850fef75c0d256889233a5b1e6994af8b994fa6fb17759ff3906307f675d"), + h2b("c3bcc82b9636bf12a9ba858ea6855b0b5a7a57803370e57cd87223f9d8d1a896"), ); let header = finalize_block(end_block); @@ -215,9 +216,10 @@ fn on_normal_block() { assert_eq!(Sassafras::current_slot_index(), 1); assert_eq!(Sassafras::randomness(), [0; 32]); assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("0bc8cce9f44a6dd90d9abd4486dfc36023a81839fac93d035ff01ef5c7a62ba8"), + h2b("a44c15061d80d1f1b58abb3e002b9bd2d7135b3c8bef95a3af2ae5079a901135"), ); // Header data check @@ -252,13 +254,15 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); + println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("72801624ceaf56c6d07a07e683643d92e91eadd09e06cb4cbe0ffe1edf6e94a1"), + h2b("fec42ab12d7497cc8863b078774560790a5f1ee38d2b3a6b7448c4cc318c6e24"), ); + println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("eb9f571fa1e2f428b81ddb33d428051cb8793227934ed50469d4ad2a84997820"), + h2b("ba92c7ea134d29bd4c663e9a5811c0c76972606acbfdad354ab3cc9d400f756c"), ); let header = finalize_block(end_block); @@ -272,13 +276,15 @@ fn produce_epoch_change_digest() { assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); assert_eq!(Sassafras::current_slot_index(), 0); assert_eq!(Sassafras::randomness(), [0; 32]); + println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("72801624ceaf56c6d07a07e683643d92e91eadd09e06cb4cbe0ffe1edf6e94a1"), + h2b("fec42ab12d7497cc8863b078774560790a5f1ee38d2b3a6b7448c4cc318c6e24"), ); + println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("d4b9d766b937902735d6423b10f3783bb384d738dd2b8d61031de406301fff8e"), + h2b("cea876f919ae1f6cdc8a93e91199d75bd162fb0b930df7168a66cdafc3ddd23c"), ); // Header data check @@ -353,7 +359,7 @@ fn submit_segments_works() { // Tweak the epoch config to discard some of the tickets let mut config = EpochConfig::::get(); - config.redundancy_factor = 1; + config.redundancy_factor = 2; EpochConfig::::set(config); // Populate the segments via the `submit_tickets` @@ -372,11 +378,11 @@ fn submit_segments_works() { assert_eq!(meta.segments_count, segments_count); assert_eq!(meta.tickets_count, [0, 0]); let seg = NextTicketsSegments::::get(0); - assert_eq!(seg.len(), 3); + assert_eq!(seg.len(), 5); let seg = NextTicketsSegments::::get(1); - assert_eq!(seg.len(), 1); + assert_eq!(seg.len(), 6); let seg = NextTicketsSegments::::get(2); - assert_eq!(seg.len(), 2); + assert_eq!(seg.len(), 4); }) } diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index b81b0ec2dcfd7..9f37198314ce5 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -23,6 +23,9 @@ use scale_info::TypeInfo; use sp_consensus_slots::Slot; use sp_core::sr25519::vrf::{VrfInput, VrfOutput}; +/// VRF context used for ticket-id generation. +const TICKET_ID_VRF_CONTEXT: &[u8] = b"SassafrasTicketIdVRFContext"; + /// Ticket identifier. /// /// Within the algorithm this is also used as a ticket score applied to bound @@ -106,12 +109,13 @@ pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> /// /// Input generally obtained via `ticket_id_vrf_input`. /// Output can be obtained directly using the vrf secret key or from the signature. -// TODO DAVXY temporary way to generate id... use io.make_bytes() -pub fn ticket_id(_in: &VrfInput, out: &VrfOutput) -> TicketId { - let preout = out; - let mut raw: [u8; 16] = [0; 16]; - raw.copy_from_slice(&preout.0 .0[0..16]); - u128::from_le_bytes(raw) +// TODO DAVXY: with new VRF authority-id is not necessary +pub fn ticket_id(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> TicketId { + let public = sp_core::sr25519::Public::from_raw([0; 32]); + vrf_output + .make_bytes::<16>(TICKET_ID_VRF_CONTEXT, vrf_input, &public) + .map(|bytes| u128::from_le_bytes(bytes)) + .unwrap_or(u128::MAX) } /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: From 5e7d6c5e7742bedfa630efa5e136f975eb5f08c0 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 18 May 2023 18:54:47 +0200 Subject: [PATCH 31/62] Sassafras Protocol v0.3.2 (#14139) - ticket structure, production and claiming close to the final form - experimental integration of `bandersnatch-vrf` - ticket ownership claiming via ed25519 challenge --- Cargo.lock | 356 ++++++--- Cargo.toml | 3 + bin/node-sassafras/node/Cargo.toml | 8 +- bin/node-sassafras/node/src/chain_spec.rs | 2 +- bin/node-sassafras/runtime/Cargo.toml | 6 +- client/consensus/sassafras/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 61 +- client/consensus/sassafras/src/lib.rs | 13 +- client/consensus/sassafras/src/tests.rs | 83 ++- .../consensus/sassafras/src/verification.rs | 48 +- client/keystore/src/local.rs | 47 +- frame/sassafras/Cargo.toml | 4 +- frame/sassafras/src/lib.rs | 45 +- frame/sassafras/src/mock.rs | 4 +- frame/sassafras/src/tests.rs | 24 +- .../application-crypto/src/bandersnatch.rs | 57 ++ primitives/application-crypto/src/lib.rs | 8 +- primitives/application-crypto/src/traits.rs | 2 +- primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/consensus/sassafras/src/digests.rs | 3 +- primitives/consensus/sassafras/src/lib.rs | 13 +- primitives/consensus/sassafras/src/ticket.rs | 31 +- primitives/core/Cargo.toml | 4 + primitives/core/src/bandersnatch.rs | 692 ++++++++++++++++++ primitives/core/src/crypto.rs | 6 +- primitives/core/src/lib.rs | 1 + primitives/core/src/testing.rs | 4 +- primitives/io/src/lib.rs | 14 + primitives/keyring/src/bandersnatch.rs | 241 ++++++ primitives/keyring/src/lib.rs | 4 + primitives/keystore/src/lib.rs | 45 +- primitives/keystore/src/testing.rs | 44 +- test-utils/runtime/Cargo.toml | 4 +- test-utils/runtime/src/genesismap.rs | 31 +- test-utils/runtime/src/lib.rs | 85 ++- 35 files changed, 1653 insertions(+), 346 deletions(-) create mode 100644 primitives/application-crypto/src/bandersnatch.rs create mode 100644 primitives/core/src/bandersnatch.rs create mode 100644 primitives/keyring/src/bandersnatch.rs diff --git a/Cargo.lock b/Cargo.lock index b8fea0c0568fb..9a3dddeb9ffc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -328,6 +328,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + [[package]] name = "ark-ff" version = "0.4.2" @@ -384,6 +396,21 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.6", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "ark-serialize" version = "0.4.2" @@ -417,6 +444,19 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-transcript" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.6", + "rand_core 0.6.4", + "sha3", +] + [[package]] name = "array-bytes" version = "4.2.0" @@ -583,7 +623,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -605,7 +645,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -616,7 +656,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -670,6 +710,26 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.6", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.11" @@ -757,13 +817,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.4", + "prettyplease 0.2.5", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -924,9 +984,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.1" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "byte-slice-cast" @@ -1237,9 +1297,9 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.2.1" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a19591b2ab0e3c04b588a0e04ddde7b9eaa423646d1b4a8092879216bf47473" +checksum = "1594fe2312ec4abf402076e407628f5c313e54c32ade058521df4ee34ecac8a8" dependencies = [ "clap 4.2.7", ] @@ -1253,7 +1313,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -1298,6 +1358,20 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#1e42bb632263f4dff86b400ec9a13af21db72360" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "concurrent-queue" version = "2.2.0" @@ -1738,7 +1812,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -1755,7 +1829,7 @@ checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2023,7 +2097,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2032,6 +2106,22 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" +[[package]] +name = "dleq_vrf" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.2", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -2097,15 +2187,16 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.16.6" +version = "0.16.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" +checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" dependencies = [ "der 0.7.5", "digest 0.10.6", "elliptic-curve 0.13.4", "rfc6979 0.4.0", "signature 2.1.0", + "spki 0.7.2", ] [[package]] @@ -2227,7 +2318,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2361,6 +2452,19 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +source = "git+https://github.com/w3f/fflonk#f60bc946e2a4340b1c2d00d30c654e82a5887983" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -2581,7 +2685,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.15", + "syn 2.0.16", "trybuild", ] @@ -2723,7 +2827,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2734,7 +2838,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2743,7 +2847,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -2961,7 +3065,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -3180,9 +3284,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -3205,9 +3309,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.6" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "035ef95d03713f2c347a72547b7cd38cbc9af7cd51e6099fb62d586d4a6dee3a" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", @@ -3466,12 +3570,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -3865,7 +3968,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa 0.16.6", + "ecdsa 0.16.7", "elliptic-curve 0.13.4", "once_cell", "sha2 0.10.6", @@ -4085,9 +4188,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libp2p" @@ -4429,7 +4532,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen 0.10.0", - "ring", + "ring 0.16.20", "rustls 0.20.8", "thiserror", "webpki 0.22.0", @@ -4874,6 +4977,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5431,7 +5546,7 @@ dependencies = [ [[package]] name = "node-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" dependencies = [ "clap 4.2.7", "frame-benchmarking", @@ -5474,7 +5589,7 @@ dependencies = [ [[package]] name = "node-sassafras-runtime" -version = "0.3.1-dev" +version = "0.3.2-dev" dependencies = [ "frame-benchmarking", "frame-executive", @@ -5754,7 +5869,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "libm 0.2.6", + "libm 0.2.7", ] [[package]] @@ -6300,7 +6415,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -7094,7 +7209,7 @@ dependencies = [ [[package]] name = "pallet-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" dependencies = [ "array-bytes 4.2.0", "frame-benchmarking", @@ -7245,7 +7360,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -7530,9 +7645,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd4572a52711e2ccff02b4973ec7e4a5b5c23387ebbfbd6cd42b34755714cefc" +checksum = "4890dcb9556136a4ec2b0c51fa4a08c8b733b829506af8fff2e853f3a065985b" dependencies = [ "blake2", "crc32fast", @@ -7725,7 +7840,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -7751,22 +7866,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -7910,14 +8025,14 @@ version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e" dependencies = [ - "portable-atomic 1.3.1", + "portable-atomic 1.3.2", ] [[package]] name = "portable-atomic" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bbda379e6e462c97ea6afe9f6233619b202bbc4968d7caa6917788d2070a044" +checksum = "dc59d1bcc64fc5d021d67521f818db868368028108d37f0e98d74e33f68297b5" [[package]] name = "ppv-lite86" @@ -7991,12 +8106,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "617feabb81566b593beb4886fb8c1f38064169dae4dccad0e3220160c3b37203" dependencies = [ "proc-macro2", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -8054,14 +8169,14 @@ checksum = "0e99670bafb56b9a106419397343bdbc8b8742c3cc449fec6345f86173f47cd4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "c4ec6d5fe0b140acb27c9a0444118cf55bfbb4e0b259739429abb4521dd67c16" dependencies = [ "unicode-ident", ] @@ -8222,7 +8337,7 @@ checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls 0.20.8", "slab", @@ -8372,7 +8487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "x509-parser 0.13.2", "yasna", @@ -8385,7 +8500,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "yasna", ] @@ -8436,7 +8551,7 @@ checksum = "8d2275aab483050ab2a7364c1a46604865ee7d6906684e08db0f090acf74f9e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -8526,6 +8641,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#1e42bb632263f4dff86b400ec9a13af21db72360" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -8693,7 +8823,7 @@ checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.1", "log", - "ring", + "ring 0.16.20", "sct 0.6.1", "webpki 0.21.4", ] @@ -8705,7 +8835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct 0.7.0", "webpki 0.22.0", ] @@ -8899,7 +9029,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -9350,7 +9480,7 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" dependencies = [ "async-trait", "fork-tree", @@ -10209,7 +10339,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -10277,9 +10407,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfdef77228a4c05dc94211441595746732131ad7f6530c6c18f045da7b7ab937" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "bitvec", "cfg-if", @@ -10331,7 +10461,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -10357,7 +10487,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10367,7 +10497,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10440,9 +10570,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "ca2855b3715770894e67cbfa3df957790aa0c9edc3bf06efa1a84d77fa0839d1" dependencies = [ "bitflags", "core-foundation", @@ -10453,9 +10583,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -10496,22 +10626,22 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -10705,7 +10835,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version 0.4.0", "sha2 0.10.6", "subtle", @@ -10769,7 +10899,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -10989,10 +11119,10 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" dependencies = [ "async-trait", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "scale-info", "serde", @@ -11023,6 +11153,8 @@ name = "sp-core" version = "7.0.0" dependencies = [ "array-bytes 4.2.0", + "arrayvec 0.7.2", + "bandersnatch_vrfs", "bitflags", "blake2", "bounded-collections", @@ -11038,7 +11170,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -11086,7 +11218,7 @@ dependencies = [ "proc-macro2", "quote", "sp-core-hashing", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -11103,7 +11235,7 @@ version = "5.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -11331,7 +11463,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -11566,7 +11698,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -11733,7 +11865,7 @@ dependencies = [ "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -12011,7 +12143,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -12067,9 +12199,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" dependencies = [ "proc-macro2", "quote", @@ -12183,7 +12315,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -12314,9 +12446,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg", "bytes", @@ -12339,7 +12471,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] @@ -12498,14 +12630,14 @@ checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -12757,7 +12889,7 @@ dependencies = [ "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -12890,9 +13022,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dad5567ad0cf5b760e5665964bec1b47dfd077ba8a2544b513f3556d3d239a2" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ "getrandom 0.2.9", ] @@ -13028,7 +13160,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", "wasm-bindgen-shared", ] @@ -13062,7 +13194,7 @@ checksum = "4783ce29f09b9d93134d41297aded3a712b7b979e9c6f28c32cb88c973a94869" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13200,7 +13332,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "memory_units", "num-rational", "num-traits", @@ -13214,7 +13346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624e6333e861ef49095d2d678b76ebf30b06bf37effca845be7e5b87c90071b7" dependencies = [ "downcast-rs", - "libm 0.2.6", + "libm 0.2.7", "num-traits", "paste", ] @@ -13454,7 +13586,7 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13464,7 +13596,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13493,7 +13625,7 @@ dependencies = [ "rand 0.8.5", "rcgen 0.9.3", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", "rustls 0.19.1", @@ -13558,7 +13690,7 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "rcgen 0.9.3", - "ring", + "ring 0.16.20", "rustls 0.19.1", "sec1 0.3.0", "serde", @@ -14005,7 +14137,7 @@ dependencies = [ "lazy_static", "nom", "oid-registry 0.4.0", - "ring", + "ring 0.16.20", "rusticata-macros", "thiserror", "time 0.3.21", @@ -14084,7 +14216,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.16", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 712485c0f79e7..33c5397825121 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,3 +338,6 @@ inherits = "release" lto = "fat" # https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units codegen-units = 1 + +#[patch."https://github.com/w3f/ring-vrf"] +# bandersnatch_vrfs = { path = "/mnt/ssd/users/develop/w3f/ring-vrf/bandersnatch_vrfs" } diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index fddc3636477df..00bd11461ad97 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" authors = ["Parity Technologies "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -27,8 +27,8 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } -sc-consensus-sassafras = { version = "0.3.1-dev", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } +sc-consensus-sassafras = { version = "0.3.2-dev", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.2-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } @@ -57,7 +57,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarkin frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } # Local Dependencies -node-sassafras-runtime = { version = "0.3.1-dev", path = "../runtime" } +node-sassafras-runtime = { version = "0.3.2-dev", path = "../runtime" } # CLI-specific dependencies try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 6ba72c5397715..0636078bf60dd 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -11,7 +11,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; // Genesis constants for Sassafras parameters configuration. -const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 32; +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 16; const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 8fe0108ae4de0..a2b99a096956c 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras-runtime" -version = "0.3.1-dev" +version = "0.3.2-dev" authors = ["Parity Technologies "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -pallet-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../../frame/sassafras" } +pallet-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } @@ -28,7 +28,7 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} -sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 877c9a55a30d5..c6b5277095772 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" authors = ["Parity Technologies "] description = "Sassafras consensus algorithm for substrate" edition = "2021" @@ -32,7 +32,7 @@ sp-application-crypto = { version = "7.0.0", path = "../../../primitives/applica sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { version = "0.3.1-dev", path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.2-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-core = { version = "7.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index ee8fc50baeb92..17da58e99f2f5 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -21,10 +21,10 @@ use super::*; use sp_consensus_sassafras::{ - digests::PreDigest, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketClaim, TicketData, - TicketEnvelope, TicketId, + digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, + TicketClaim, TicketData, TicketEnvelope, TicketId, }; -use sp_core::{twox_64, ByteArray}; +use sp_core::{twox_64, ByteArray, ed25519}; use std::pin::Pin; @@ -41,7 +41,7 @@ pub(crate) fn secondary_authority_index( /// If ticket is `None`, then the slot should be claimed using the fallback mechanism. pub(crate) fn claim_slot( slot: Slot, - epoch: &Epoch, + epoch: &mut Epoch, maybe_ticket: Option<(TicketId, TicketData)>, keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { @@ -51,34 +51,38 @@ pub(crate) fn claim_slot( return None } + let mut vrf_sign_data = slot_claim_sign_data(&config.randomness, slot, epoch.epoch_idx); + let (authority_idx, ticket_claim) = match maybe_ticket { Some((ticket_id, ticket_data)) => { - log::debug!(target: LOG_TARGET, "[TRY PRIMARY]"); - let (authority_idx, ticket_secret) = epoch.tickets_aux.get(&ticket_id)?.clone(); + log::debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:16x})]"); + let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?.clone(); log::debug!( target: LOG_TARGET, - "Ticket = [ticket: {:x?}, auth: {}, attempt: {}]", - ticket_id, + " got ticket: auth: {}, attempt: {}", authority_idx, ticket_data.attempt_idx ); - // TODO DAVXY : using ticket_secret - let _ = ticket_secret; - let erased_signature = [0; 64]; + + vrf_sign_data.push_transcript_data(&ticket_data.encode()); + + let data = vrf_sign_data.challenge::<32>(); + let erased_pair = ed25519::Pair::from_seed(&ticket_secret.erased_secret); + let erased_signature = *erased_pair.sign(&data).as_ref(); + let claim = TicketClaim { erased_signature }; (authority_idx, Some(claim)) }, None => { - log::debug!(target: LOG_TARGET, "[TRY SECONDARY]"); + log::debug!(target: LOG_TARGET, "[TRY SECONDARY (slot {slot})]"); (secondary_authority_index(slot, config), None) }, }; let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; - let vrf_input = slot_claim_vrf_input(&config.randomness, slot, epoch.epoch_idx); let vrf_signature = keystore - .sr25519_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_input.into()) + .bandersnatch_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_sign_data) .ok() .flatten()?; @@ -103,7 +107,8 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec Vec Vec Vec, Epoch>, ) -> Option { - debug!(target: LOG_TARGET, "Attempting to claim slot {}", slot); - // Get the next slot ticket from the runtime. let maybe_ticket = self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; - // TODO-SASS-P2: remove me - debug!(target: LOG_TARGET, "parent {}", parent_header.hash()); + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + let mut epoch = epoch_changes.viable_epoch_mut(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))?; let claim = authorship::claim_slot( slot, - self.epoch_changes - .shared_data() - .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))? - .as_ref(), + &mut epoch.as_mut(), maybe_ticket, &self.keystore, ); @@ -282,6 +283,10 @@ where // TODO DAVXY SASS-32: this seal may be revisited. // We already have a VRF signature, this could be completelly redundant. // The header.hash() can be added to the VRF signed data. + // OR maybe we can maintain this seal but compute it using some of the data in the + // pre-digest + // Another option is to not recompute this signature and push (reuse) the one in the + // pre-digest as the seal let signature = self .keystore .sign_with( diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 085f92c0ab57d..4635c77e1fa8b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -77,10 +77,10 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, - slot_claim_vrf_input, ticket_id_vrf_input, AuthorityId, AuthorityIndex, AuthorityPair, - AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, SassafrasConfiguration, - SassafrasEpochConfiguration, TicketClaim, TicketData, TicketEnvelope, TicketId, TicketSecret, - RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + slot_claim_sign_data, slot_claim_vrf_input, ticket_id_vrf_input, AuthorityId, AuthorityIndex, + AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, + SassafrasConfiguration, SassafrasEpochConfiguration, TicketClaim, TicketData, TicketEnvelope, + TicketId, TicketSecret, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; mod authorship; @@ -284,11 +284,10 @@ fn find_pre_digest(header: &B::Header) -> Result> if header.number().is_zero() { // Genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code - use sp_consensus_sassafras::VrfInput; use sp_core::crypto::VrfSecret; let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); - let input = VrfInput::new(b"", &[]); - let vrf_signature = pair.as_ref().vrf_sign(&input.into()); + let data = sp_consensus_sassafras::slot_claim_sign_data(&Default::default(), 0.into(), 0); + let vrf_signature = pair.as_ref().vrf_sign(&data); return Ok(PreDigest { authority_idx: 0, slot: 0.into(), ticket_claim: None, vrf_signature }) } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index f4e93798a1660..1438962d960f7 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -34,8 +34,8 @@ use sc_network_test::*; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; -use sp_consensus_sassafras::{inherents::InherentDataProvider, slot_claim_vrf_input}; -use sp_keyring::Sr25519Keyring; +use sp_consensus_sassafras::inherents::InherentDataProvider; +use sp_keyring::BandersnatchKeyring as Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{Digest, DigestItem}; use sp_timestamp::Timestamp; @@ -172,9 +172,11 @@ fn create_test_block_import( .expect("can initialize block-import") } -fn create_test_keystore(authority: Sr25519Keyring) -> KeystorePtr { +fn create_test_keystore(authority: Keyring) -> KeystorePtr { let keystore = MemoryKeystore::new(); - keystore.sr25519_generate_new(SASSAFRAS, Some(&authority.to_seed())).unwrap(); + keystore + .bandersnatch_generate_new(SASSAFRAS, Some(&authority.to_seed())) + .unwrap(); keystore.into() } @@ -183,9 +185,9 @@ fn create_test_config() -> SassafrasConfiguration { slot_duration: SLOT_DURATION, epoch_duration: EPOCH_DURATION, authorities: vec![ - (Sr25519Keyring::Alice.public().into(), 1), - (Sr25519Keyring::Bob.public().into(), 1), - (Sr25519Keyring::Charlie.public().into(), 1), + (Keyring::Alice.public().into(), 1), + (Keyring::Bob.public().into(), 1), + (Keyring::Charlie.public().into(), 1), ], randomness: [0; 32], threshold_params: SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, @@ -205,7 +207,7 @@ impl TestContext { let (block_import, link) = create_test_block_import(client.clone(), config.clone()); // Create a keystore with default testing key - let keystore = create_test_keystore(Sr25519Keyring::Alice); + let keystore = create_test_keystore(Keyring::Alice); let verifier = create_test_verifier(client.clone(), &link, config.clone()); @@ -289,7 +291,7 @@ impl TestContext { let parent_header = self.client.header(parent_hash).unwrap().unwrap(); let parent_number = *parent_header.number(); - let public = self.keystore.sr25519_public_keys(SASSAFRAS)[0]; + let public = self.keystore.bandersnatch_public_keys(SASSAFRAS)[0]; let proposer = block_on(self.init(&parent_header)).unwrap(); @@ -298,14 +300,12 @@ impl TestContext { parent_pre_digest.slot + 1 }); + // TODO DAVXY: here maybe we can use the epoch.randomness??? let epoch = self.epoch_data(&parent_hash, parent_number, slot); - let vrf_input = - slot_claim_vrf_input(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); - let vrf_signature = self - .keystore - .sr25519_vrf_sign(SASSAFRAS, &public, &vrf_input.into()) - .unwrap() - .unwrap(); + let data = + slot_claim_sign_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let vrf_signature = + self.keystore.bandersnatch_vrf_sign(SASSAFRAS, &public, &data).unwrap().unwrap(); let pre_digest = PreDigest { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; let digest = sp_runtime::generic::Digest { @@ -320,7 +320,7 @@ impl TestContext { let hash = block.header.hash(); let signature = self .keystore - .sr25519_sign(SASSAFRAS, &public, hash.as_ref()) + .bandersnatch_sign(SASSAFRAS, &public, hash.as_ref()) .unwrap() .unwrap() .try_into() @@ -372,9 +372,9 @@ fn claim_secondary_slots_works() { let mut config = create_test_config(); config.randomness = [2; 32]; - let authorities = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; + let authorities = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; - let epoch = Epoch { + let mut epoch = Epoch { epoch_idx: 1, start_slot: 6.into(), config: config.clone(), @@ -388,7 +388,7 @@ fn claim_secondary_slots_works() { for slot in 0..config.epoch_duration { if let Some((claim, auth_id2)) = - authorship::claim_slot(slot.into(), &epoch, None, &keystore) + authorship::claim_slot(slot.into(), &mut epoch, None, &keystore) { assert_eq!(claim.authority_idx as usize, auth_idx); assert_eq!(claim.slot, Slot::from(slot)); @@ -422,41 +422,44 @@ fn claim_primary_slots_works() { tickets_aux: Default::default(), }; - let keystore = create_test_keystore(Sr25519Keyring::Alice); + let keystore = create_test_keystore(Keyring::Alice); + let alice_authority_idx = 0_u32; - // Success if we have ticket aux data and the authority key in our keystore - // ticket-aux: OK , authority-key: OK => SUCCESS - - let authority_idx = 0u32; let ticket_id = 123; let ticket_data = TicketData { attempt_idx: 0, erased_public: [0; 32], revealed_public: [0; 32] }; let ticket_secret = TicketSecret { attempt_idx: 0, erased_secret: [0; 32] }; - epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret.clone())); - - let (pre_digest, auth_id) = - authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data.clone())), &keystore) - .unwrap(); - - assert_eq!(pre_digest.authority_idx, authority_idx); - assert_eq!(auth_id, Sr25519Keyring::Alice.public().into()); // Fail if we have authority key in our keystore but not ticket aux data // ticket-aux: KO , authority-key: OK => FAIL - let ticket_id = 321; let claim = - authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data.clone())), &keystore); + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data.clone())), &keystore); + assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); + + // Success if we have ticket aux data and the authority key in our keystore + // ticket-aux: OK , authority-key: OK => SUCCESS + + epoch.tickets_aux.insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); + + let (pre_digest, auth_id) = + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data.clone())), &keystore) + .unwrap(); + + assert!(epoch.tickets_aux.is_empty()); + assert_eq!(pre_digest.authority_idx, alice_authority_idx); + assert_eq!(auth_id, Keyring::Alice.public().into()); // Fail if we have ticket aux data but not the authority key in out keystore // ticket-aux: OK , authority-key: KO => FAIL - let authority_idx = 1u32; // we don't have this key - let ticket_id = 666; - epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret)); - let claim = authorship::claim_slot(0.into(), &epoch, Some((ticket_id, ticket_data)), &keystore); + epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); + + let claim = authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data)), &keystore); assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); } #[test] @@ -839,7 +842,7 @@ async fn sassafras_network_progress() { let net = SassafrasTestNet::new(3); let net = Arc::new(Mutex::new(net)); - let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; + let peers = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; let mut import_notifications = Vec::new(); let mut sassafras_workers = Vec::new(); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index f83a4223ca05c..5a5dfc457ef80 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -20,7 +20,7 @@ use super::*; use sp_application_crypto::Wraps; -use sp_core::crypto::VrfPublic; +use sp_core::{crypto::VrfPublic, ed25519}; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; @@ -81,35 +81,35 @@ fn check_header( None => return Err(sassafras_err(Error::SlotAuthorNotFound)), }; - // Check header signature + // Check header signature (aka the Seal) - // Check slot-vrf proof + let signature = seal + .as_sassafras_seal() + .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; - let vrf_input = slot_claim_vrf_input(&config.randomness, pre_digest.slot, epoch.epoch_idx); - if !authority_id - .as_inner_ref() - .vrf_verify(&vrf_input.into(), &pre_digest.vrf_signature) - { - return Err(sassafras_err(Error::VrfVerificationFailed)) + let pre_hash = header.hash(); + if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) } - // let signature = seal - // .as_sassafras_seal() - // .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; - - // let pre_hash = header.hash(); - // if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { - // return Err(sassafras_err(Error::BadSignature(pre_hash))) - // } + // Optionally check ticket ownership - // Check authorship method and claim + let mut vrf_sign_data = slot_claim_sign_data(&config.randomness, pre_digest.slot, epoch.epoch_idx); match (&maybe_ticket, &pre_digest.ticket_claim) { (Some((_ticket_id, ticket_data)), Some(ticket_claim)) => { log::debug!(target: LOG_TARGET, "checking primary"); - // TODO DAVXY: check erased_signature - let _public = ticket_data.erased_public; - let _signature = ticket_claim.erased_signature; + + vrf_sign_data.push_transcript_data(&ticket_data.encode()); + let challenge = vrf_sign_data.challenge::<32>(); + + let erased_public = ed25519::Public::from_raw(ticket_data.erased_public); + let erased_signature = + ed25519::Signature::from_raw(ticket_claim.erased_signature); + + if !ed25519::Pair::verify(&erased_signature, &challenge, &erased_public) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } }, (None, None) => { log::debug!(target: LOG_TARGET, "checking secondary"); @@ -130,6 +130,12 @@ fn check_header( }, } + // Check per-slot vrf proof + + if !authority_id.as_inner_ref().vrf_verify(&vrf_sign_data, &pre_digest.vrf_signature) { + return Err(sassafras_err(Error::VrfVerificationFailed)) + } + let info = VerifiedHeaderInfo { authority_id, seal }; Ok(CheckedHeader::Checked(header, info)) diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 4167e486ecf62..0fdbb24bf70b7 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -19,12 +19,13 @@ use parking_lot::RwLock; use sp_application_crypto::{AppCrypto, AppPair, IsWrappedBy}; -#[cfg(feature = "bls-experimental")] -use sp_core::{bls377, bls381}; use sp_core::{ + bandersnatch, crypto::{ByteArray, ExposeSecret, KeyTypeId, Pair as CorePair, SecretString, VrfSecret}, ecdsa, ed25519, sr25519, }; +#[cfg(feature = "bls-experimental")] +use sp_core::{bls377, bls381}; use sp_keystore::{Error as TraitError, Keystore, KeystorePtr}; use std::{ collections::HashMap, @@ -234,6 +235,48 @@ impl Keystore for LocalKeystore { Ok(sig) } + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + self.generate_new::(key_type, seed) + } + + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> std::result::Result, TraitError> { + self.sign::(key_type, public, msg) + } + + // TODO DAVXY + // Maybe we can expose just this bandersnatch sign (the above one reduces to this with + // input len = 0) + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + ) -> std::result::Result, TraitError> { + self.vrf_sign::(key_type, public, data) + } + + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> std::result::Result, TraitError> { + self.vrf_output::(key_type, public, input) + } + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 41b50759f39cf..592d50b33be0e 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -22,7 +22,7 @@ pallet-session = { version = "4.0.0-dev", default-features = false, path = "../s pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 3df4095335e3e..39a24041499f0 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -56,7 +56,7 @@ use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, SassafrasConfiguration, SassafrasEpochConfiguration, Slot, TicketData, TicketEnvelope, - TicketId, RANDOMNESS_LENGTH, RANDOMNESS_VRF_CONTEXT, SASSAFRAS_ENGINE_ID, + TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -80,7 +80,9 @@ pub mod session; // Re-export pallet symbols. pub use pallet::*; -const LOG_TARGET: &str = "runtime::sassafras 🌳"; +const LOG_TARGET: &str = "sassafras::runtime 🌳"; + +const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasRandomness"; /// Tickets related metadata that is commonly used together. #[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] @@ -294,38 +296,27 @@ pub mod pallet { /// Block finalization fn on_finalize(_now: BlockNumberFor) { + // TODO DAVXY: check if is a disabled validator? + // At the end of the block, we can safely include the new VRF output from // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has - // already occurred at this point, so the + // already occurred at this point. let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - // TODO DAVXY P32: probably with the new vrf we don't need the authority id - // `inout` is sufficent - let authority_idx = pre_digest.authority_idx; - let authorities = Authorities::::get(); - let authority_id = authorities - .get(authority_idx as usize) - .expect("Authority should be valid at this point; qed"); - - // TODO DAVXY: check if is a disabled validator - let vrf_input = sp_consensus_sassafras::slot_claim_vrf_input( &Self::randomness(), CurrentSlot::::get(), EpochIndex::::get(), ); - let vrf_preout = &pre_digest.vrf_signature.output; - - let randomness = vrf_preout - .make_bytes::( - RANDOMNESS_VRF_CONTEXT, - &vrf_input, - authority_id.0.as_ref(), - ) - .expect("Can't fail? TODO DAVXY"); + let randomness = pre_digest + .vrf_signature + .vrf_outputs + .get(0) + .expect("vrf preout should have been already checked by the client; qed") + .make_bytes::(RANDOMNESS_VRF_CONTEXT, &vrf_input); Self::deposit_randomness(&randomness); @@ -362,7 +353,7 @@ pub mod pallet { ) -> DispatchResult { ensure_none(origin)?; - log::debug!(target: LOG_TARGET, "@@@@@@@@@@ received {} tickets", tickets.len()); + log::debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); // Check tickets score let next_auth = NextAuthorities::::get(); @@ -376,8 +367,9 @@ pub mod pallet { next_auth.len() as u32, ); - let epoch_idx = EpochIndex::::get(); - let randomness = CurrentRandomness::::get(); + // Get next epoch params + let randomness = NextRandomness::::get(); + let epoch_idx = EpochIndex::::get() + 1; let mut segment = BoundedVec::with_max_capacity(); for ticket in tickets.iter() { @@ -396,7 +388,8 @@ pub mod pallet { } if !segment.is_empty() { - log::debug!(target: LOG_TARGET, "@@@@@@@@@@ appending segment with {} tickets", segment.len()); + log::debug!(target: LOG_TARGET, "Appending segment with {} tickets", segment.len()); + segment.iter().for_each(|t| log::debug!(target: LOG_TARGET, " + {t:16x}")); let mut metadata = TicketsMeta::::get(); NextTicketsSegments::::insert(metadata.segments_count, segment); metadata.segments_count += 1; diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 337a8d346cef2..5dde8abc11abd 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -182,8 +182,8 @@ fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { randomness = crate::NextRandomness::::get(); } - let vrf_input = sp_consensus_sassafras::slot_claim_vrf_input(&randomness, slot, epoch); - pair.as_ref().vrf_sign(&vrf_input.into()) + let data = sp_consensus_sassafras::slot_claim_sign_data(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&data) } /// Produce a `PreDigest` instance for the given parameters. diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index dbb0d3af87cb9..c9e26f80fba04 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -149,7 +149,7 @@ fn on_first_block_after_genesis() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("c3bcc82b9636bf12a9ba858ea6855b0b5a7a57803370e57cd87223f9d8d1a896"), + h2b("eb169de47822691578f74204ace5bc57c38f86f97e15a8abf71114541e7ca9e8"), ); // Header data check @@ -201,7 +201,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("c3bcc82b9636bf12a9ba858ea6855b0b5a7a57803370e57cd87223f9d8d1a896"), + h2b("eb169de47822691578f74204ace5bc57c38f86f97e15a8abf71114541e7ca9e8"), ); let header = finalize_block(end_block); @@ -219,7 +219,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("a44c15061d80d1f1b58abb3e002b9bd2d7135b3c8bef95a3af2ae5079a901135"), + h2b("c5e06d78bf5351b3a740c6838976e571ee14c595a206278f3f4ce0157f538318"), ); // Header data check @@ -257,12 +257,12 @@ fn produce_epoch_change_digest() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("fec42ab12d7497cc8863b078774560790a5f1ee38d2b3a6b7448c4cc318c6e24"), + h2b("a7abdd705eb72383f60f6f093dea4bbfb65a1992099b4928ca30076f71a73682"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("ba92c7ea134d29bd4c663e9a5811c0c76972606acbfdad354ab3cc9d400f756c"), + h2b("a9d8fc258ba0274d7815664b4e153904c44d2e850e98cffc0ba03ea018611348"), ); let header = finalize_block(end_block); @@ -279,12 +279,12 @@ fn produce_epoch_change_digest() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("fec42ab12d7497cc8863b078774560790a5f1ee38d2b3a6b7448c4cc318c6e24"), + h2b("a7abdd705eb72383f60f6f093dea4bbfb65a1992099b4928ca30076f71a73682"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("cea876f919ae1f6cdc8a93e91199d75bd162fb0b930df7168a66cdafc3ddd23c"), + h2b("53b4e087baba183a2973552ba57b6c8f489959c8e5f838d59884d37c6d494e2f"), ); // Header data check @@ -378,11 +378,11 @@ fn submit_segments_works() { assert_eq!(meta.segments_count, segments_count); assert_eq!(meta.tickets_count, [0, 0]); let seg = NextTicketsSegments::::get(0); - assert_eq!(seg.len(), 5); + assert_eq!(seg.len(), 3); let seg = NextTicketsSegments::::get(1); - assert_eq!(seg.len(), 6); + assert_eq!(seg.len(), 5); let seg = NextTicketsSegments::::get(2); - assert_eq!(seg.len(), 4); + assert_eq!(seg.len(), 5); }) } @@ -525,8 +525,8 @@ fn submit_enact_claim_tickets() { let mut expected_ids: Vec<_> = tickets .iter() .map(|t| { - let epoch_idx = Sassafras::epoch_index(); - let randomness = Sassafras::randomness(); + let epoch_idx = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( &randomness, t.data.attempt_idx, diff --git a/primitives/application-crypto/src/bandersnatch.rs b/primitives/application-crypto/src/bandersnatch.rs new file mode 100644 index 0000000000000..68adee8061c00 --- /dev/null +++ b/primitives/application-crypto/src/bandersnatch.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Bandersnatch VRF application crypto types. + +use crate::{KeyTypeId, RuntimePublic}; +pub use sp_core::bandersnatch::*; +use sp_std::vec::Vec; + +mod app { + crate::app_crypto!(super, sp_core::testing::BANDERSNATCH); +} + +#[cfg(feature = "full_crypto")] +pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; + +impl RuntimePublic for Public { + type Signature = Signature; + + fn all(_key_type: KeyTypeId) -> Vec { + // sp_io::crypto::bandersnatch_public_keys(key_type) + unimplemented!() + } + + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::bandersnatch_generate(key_type, seed) + } + + fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { + // sp_io::crypto::bandersnatch_sign(key_type, self, msg.as_ref()) + unimplemented!() + } + + fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { + // sp_io::crypto::bandersnatch_verify(signature, msg.as_ref(), self) + unimplemented!() + } + + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::ByteArray::to_raw_vec(self) + } +} diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 3e8f2f5a77b3a..2c4f4835bd20a 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -41,6 +41,11 @@ pub use serde; #[doc(hidden)] pub use sp_std::{ops::Deref, vec::Vec}; +pub use traits::*; + +mod traits; + +pub mod bandersnatch; #[cfg(feature = "bls-experimental")] pub mod bls377; #[cfg(feature = "bls-experimental")] @@ -48,9 +53,6 @@ pub mod bls381; pub mod ecdsa; pub mod ed25519; pub mod sr25519; -mod traits; - -pub use traits::*; /// Declares `Public`, `Pair` and `Signature` types which are functionally equivalent /// to the corresponding types defined by `$module` but are new application-specific diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index 88d4bf36915d0..d8869f19d0dab 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -31,7 +31,7 @@ use sp_std::{fmt::Debug, vec::Vec}; /// Typically, the implementers of this trait are its associated types themselves. /// This provides a convenient way to access generic information about the scheme /// given any of the associated types. -pub trait AppCrypto: 'static + Send + Sync + Sized + CryptoType + Clone { +pub trait AppCrypto: 'static + Send + Sized + CryptoType + Clone { /// Identifier for application-specific key type. const ID: KeyTypeId; diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 3a58dd5a8f5fa..73de4164041a3 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "0.3.1-dev" +version = "0.3.2-dev" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2021" diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index d44d227b7ed9a..0ffc5e998223c 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -19,13 +19,12 @@ use super::{ ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, - SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, SASSAFRAS_ENGINE_ID, + SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_core::sr25519::vrf::VrfSignature; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 6838f4d175762..4ee21f5dd3f64 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -32,20 +32,20 @@ use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; -pub use sp_core::sr25519::vrf::{VrfInput, VrfOutput, VrfProof, VrfSignData, VrfSignature}; +pub use sp_core::bandersnatch::vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}; pub mod digests; pub mod inherents; pub mod ticket; pub use ticket::{ - slot_claim_vrf_input, ticket_id, ticket_id_threshold, ticket_id_vrf_input, TicketClaim, - TicketData, TicketEnvelope, TicketId, TicketSecret, + slot_claim_sign_data, slot_claim_vrf_input, ticket_id, ticket_id_threshold, + ticket_id_vrf_input, TicketClaim, TicketData, TicketEnvelope, TicketId, TicketSecret, }; mod app { - use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; - app_crypto!(sr25519, SASSAFRAS); + use sp_application_crypto::{app_crypto, bandersnatch, key_types::SASSAFRAS}; + app_crypto!(bandersnatch, SASSAFRAS); } /// Key type for Sassafras protocol. @@ -54,9 +54,6 @@ pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; /// Consensus engine identifier. pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; -/// VRF context used for per-slot randomness generation. -pub const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasRandomnessVRFContext"; - /// VRF output length for per-slot randomness. pub const RANDOMNESS_LENGTH: usize = 32; diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 9f37198314ce5..0cedae3541809 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -21,10 +21,7 @@ use super::{Randomness, SASSAFRAS_ENGINE_ID}; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_consensus_slots::Slot; -use sp_core::sr25519::vrf::{VrfInput, VrfOutput}; - -/// VRF context used for ticket-id generation. -const TICKET_ID_VRF_CONTEXT: &[u8] = b"SassafrasTicketIdVRFContext"; +use sp_core::bandersnatch::vrf::{VrfInput, VrfOutput, VrfSignData}; /// Ticket identifier. /// @@ -82,14 +79,24 @@ pub fn slot_claim_vrf_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput::new( &SASSAFRAS_ENGINE_ID, &[ - (b"type", b"ticket-claim-transcript"), + (b"type", b"slot-claim"), + (b"randomness", randomness), (b"slot", &slot.to_le_bytes()), (b"epoch", &epoch.to_le_bytes()), - (b"randomness", randomness), ], ) } +/// Signing-data to claim slot ownership during block production. +/// +/// Input randomness is current epoch randomness. +pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { + let vrf_input = slot_claim_vrf_input(randomness, slot, epoch); + + VrfSignData::from_iter(&SASSAFRAS_ENGINE_ID, &[b"slot-claim-transcript"], [vrf_input]) + .expect("can't fail; qed") +} + /// VRF input to generate the ticket id. /// /// Input randomness is current epoch randomness. @@ -97,10 +104,10 @@ pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput::new( &SASSAFRAS_ENGINE_ID, &[ - (b"type", b"ticket-id-transcript"), + (b"type", b"ticket-id"), + (b"randomness", randomness), (b"attempt", &attempt.to_le_bytes()), (b"epoch", &epoch.to_le_bytes()), - (b"randomness", randomness), ], ) } @@ -109,13 +116,9 @@ pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> /// /// Input generally obtained via `ticket_id_vrf_input`. /// Output can be obtained directly using the vrf secret key or from the signature. -// TODO DAVXY: with new VRF authority-id is not necessary pub fn ticket_id(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> TicketId { - let public = sp_core::sr25519::Public::from_raw([0; 32]); - vrf_output - .make_bytes::<16>(TICKET_ID_VRF_CONTEXT, vrf_input, &public) - .map(|bytes| u128::from_le_bytes(bytes)) - .unwrap_or(u128::MAX) + let bytes = vrf_output.make_bytes::<16>(b"vrf-out", vrf_input); + u128::from_le_bytes(bytes) } /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index f99ed6c53efc6..8a431ed34e954 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -13,6 +13,8 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", default-features = false } +arrayvec = { version = "0.7.2", default-features = false } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive","max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } @@ -72,12 +74,14 @@ bench = false [features] default = ["std"] std = [ + "arrayvec/std", "merlin/std", "full_crypto", "log/std", "thiserror", "lazy_static", "parking_lot", + "bandersnatch_vrfs/getrandom", "bounded-collections/std", "primitive-types/std", "primitive-types/serde", diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs new file mode 100644 index 0000000000000..f0af2ae705911 --- /dev/null +++ b/primitives/core/src/bandersnatch.rs @@ -0,0 +1,692 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! TODO DOCS. + +// #![allow(unused)] + +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ + ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, +}; +#[cfg(feature = "full_crypto")] +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; + +#[cfg(feature = "full_crypto")] +use bandersnatch_vrfs::SecretKey; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use sp_runtime_interface::pass_by::PassByInner; +use sp_std::vec::Vec; + +/// Identifier used to match public keys against bandersnatch-vrf keys. +pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bs38"); + +#[cfg(feature = "full_crypto")] +const SIGNING_CTX: &[u8] = b"SigningContext"; +#[cfg(feature = "full_crypto")] +const SEED_SERIALIZED_LEN: usize = 32; +const PUBLIC_SERIALIZED_LEN: usize = 32; +const SIGNATURE_SERIALIZED_LEN: usize = 64; + +/// XXX. +#[cfg_attr(feature = "full_crypto", derive(Hash))] +#[derive( + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Encode, + Decode, + PassByInner, + MaxEncodedLen, + TypeInfo, +)] +pub struct Public(pub [u8; PUBLIC_SERIALIZED_LEN]); + +impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_LEN]> for Public { + fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_LEN]) -> Self { + Public(raw) + } +} + +impl AsRef<[u8; PUBLIC_SERIALIZED_LEN]> for Public { + fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_LEN] { + &self.0 + } +} + +impl AsRef<[u8]> for Public { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for Public { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl TryFrom<&[u8]> for Public { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != PUBLIC_SERIALIZED_LEN { + return Err(()) + } + let mut r = [0u8; PUBLIC_SERIALIZED_LEN]; + r.copy_from_slice(data); + Ok(Self::unchecked_from(r)) + } +} + +impl ByteArray for Public { + const LEN: usize = PUBLIC_SERIALIZED_LEN; +} + +impl TraitPublic for Public {} + +impl CryptoType for Public { + #[cfg(feature = "full_crypto")] + type Pair = Pair; +} + +impl Derive for Public {} + +impl sp_std::fmt::Debug for Public { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +/// TODO davxy: DOCS +#[cfg_attr(feature = "full_crypto", derive(Hash))] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] +pub struct Signature([u8; SIGNATURE_SERIALIZED_LEN]); + +impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_LEN]> for Signature { + fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_LEN]) -> Self { + Signature(raw) + } +} + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsMut<[u8]> for Signature { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } +} + +impl TryFrom<&[u8]> for Signature { + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() != SIGNATURE_SERIALIZED_LEN { + return Err(()) + } + let mut r = [0u8; SIGNATURE_SERIALIZED_LEN]; + r.copy_from_slice(data); + Ok(Self::unchecked_from(r)) + } +} + +impl ByteArray for Signature { + const LEN: usize = SIGNATURE_SERIALIZED_LEN; +} + +impl CryptoType for Signature { + #[cfg(feature = "full_crypto")] + type Pair = Pair; +} + +impl sp_std::fmt::Debug for Signature { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +/// The raw secret seed, which can be used to recreate the `Pair`. +#[cfg(feature = "full_crypto")] +type Seed = [u8; SEED_SERIALIZED_LEN]; + +/// TODO davxy: DOCS +#[cfg(feature = "full_crypto")] +#[derive(Clone)] +pub struct Pair(SecretKey); + +#[cfg(feature = "full_crypto")] +impl TraitPair for Pair { + type Seed = Seed; + type Public = Public; + type Signature = Signature; + + /// Make a new key pair from secret seed material. + /// + /// The slice must be 64 bytes long or it will return an error. + fn from_seed_slice(seed_slice: &[u8]) -> Result { + if seed_slice.len() != SEED_SERIALIZED_LEN { + return Err(SecretStringError::InvalidSeedLength) + } + let mut seed_raw = [0; SEED_SERIALIZED_LEN]; + seed_raw.copy_from_slice(seed_slice); + let secret = SecretKey::from_seed(&seed_raw); + Ok(Pair(secret)) + } + + /// Derive a child key from a series of given (hard) junctions. + /// + /// Soft junctions are not supported. + fn derive>( + &self, + path: Iter, + _seed: Option, + ) -> Result<(Pair, Option), DeriveError> { + // TODO davxy is this good? + let derive_hard_junction = |secret_seed, cc| -> Seed { + ("bandersnatch-vrf-HDKD", secret_seed, cc).using_encoded(sp_core_hashing::blake2_256) + }; + + let mut acc = [0; SEED_SERIALIZED_LEN]; + for j in path { + match j { + DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), + DeriveJunction::Hard(cc) => acc = derive_hard_junction(acc, cc), + } + } + Ok((Self::from_seed(&acc), Some(acc))) + } + + /// Get the public key. + fn public(&self) -> Public { + let public = self.0.to_public(); + let mut raw = [0; PUBLIC_SERIALIZED_LEN]; + public.0.serialize(raw.as_mut_slice()).expect("key buffer length is good; qed"); + Public::unchecked_from(raw) + } + + /// Sign raw data. + fn sign(&self, data: &[u8]) -> Signature { + let data = vrf::VrfSignData::new(SIGNING_CTX, &[data], vrf::VrfIosVec::default()); + self.vrf_sign(&data).signature + } + + /// Verify a signature on a message. + /// + /// Returns true if the signature is good. + fn verify>(signature: &Self::Signature, data: M, public: &Self::Public) -> bool { + let data = vrf::VrfSignData::new(SIGNING_CTX, &[data.as_ref()], vrf::VrfIosVec::default()); + let signature = vrf::VrfSignature { + signature: signature.clone(), + vrf_outputs: vrf::VrfIosVec::default(), + }; + public.vrf_verify(&data, &signature) + } + + /// Return a vec filled with seed raw data. + fn to_raw_vec(&self) -> Vec { + // TODO davxy: makes sense??? Should we returne the seed or serialized secret key? + // If we return the serialized secret there is no method to reconstruct if ... + // unimplemented!() + panic!() + } +} + +#[cfg(feature = "full_crypto")] +impl CryptoType for Pair { + type Pair = Pair; +} + +/// VRF related types and operations. +pub mod vrf { + use super::*; + use crate::{bounded::BoundedVec, crypto::VrfCrypto, ConstU32}; + use bandersnatch_vrfs::{ + CanonicalDeserialize, CanonicalSerialize, IntoVrfInput, Message, PublicKey, + ThinVrfSignature, Transcript, + }; + + const PREOUT_SERIALIZED_LEN: usize = 32; + + /// Max number of VRF inputs/outputs + pub const MAX_VRF_IOS: u32 = 3; + + pub(super) type VrfIosVec = BoundedVec>; + + /// Input to be used for VRF sign and verify operations. + #[derive(Clone)] + pub struct VrfInput(pub(super) bandersnatch_vrfs::VrfInput); + + impl VrfInput { + /// Build a new VRF input. + /// + /// Each message tuple has the form: (domain, data). + // TODO: Maybe we should access directly the transcript. + // I see a commented method in bandersnatch_vrfs crate that fullfil what we need... + pub fn new(label: &'static [u8], messages: &[(&[u8], &[u8])]) -> Self { + let _ = label; + let mut buf = Vec::new(); + messages.into_iter().for_each(|(domain, message)| { + buf.extend_from_slice(domain); + buf.extend_from_slice(message); + }); + let msg = Message { domain: b"TODO-DAVXY-FIXME", message: buf.as_slice() }; + VrfInput(msg.into_vrf_input()) + } + } + + /// TODO davxy docs + #[derive(Clone, Debug, PartialEq, Eq)] + pub struct VrfOutput(pub(super) bandersnatch_vrfs::VrfPreOut); + + impl Encode for VrfOutput { + fn encode(&self) -> Vec { + let mut bytes = [0; PREOUT_SERIALIZED_LEN]; + self.0 + .serialize_compressed(bytes.as_mut_slice()) + .expect("preout serialization can't fail"); + bytes.encode() + } + } + + impl Decode for VrfOutput { + fn decode(i: &mut R) -> Result { + let buf = <[u8; PREOUT_SERIALIZED_LEN]>::decode(i)?; + let preout = bandersnatch_vrfs::VrfPreOut::deserialize_compressed(buf.as_slice()) + .map_err(|_| "vrf-preout decode error: bad preout")?; + Ok(VrfOutput(preout)) + } + } + + impl MaxEncodedLen for VrfOutput { + fn max_encoded_len() -> usize { + <[u8; PREOUT_SERIALIZED_LEN]>::max_encoded_len() + } + } + + impl TypeInfo for VrfOutput { + type Identity = [u8; PREOUT_SERIALIZED_LEN]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// TODO davxy docs + pub struct VrfSignData { + /// Associated Fiat-Shamir transcript + pub transcript: Transcript, + /// VRF inputs to be signed. + pub vrf_inputs: VrfIosVec, + } + + impl VrfSignData { + /// Construct a new data to be signed. + pub fn new>>( + label: &'static [u8], + transcript_data: &[&[u8]], + vrf_inputs: T, + ) -> Self { + let mut transcript = Transcript::new_labeled(label); + transcript_data.iter().for_each(|data| transcript.append_slice(data)); + VrfSignData { transcript, vrf_inputs: vrf_inputs.into() } + } + + /// Construct a new data to be signed from an iterator of `VrfInputs`. + /// + /// Returns `Err` if the `vrf_inputs` yields more elements than `MAX_VRF_IOS` + pub fn from_iter>( + label: &'static [u8], + transcript_data: &[&[u8]], + vrf_inputs: T, + ) -> Result { + let vrf_inputs: Vec = vrf_inputs.into_iter().collect(); + let bounded = VrfIosVec::try_from(vrf_inputs).map_err(|_| ())?; + Ok(Self::new(label, transcript_data, bounded)) + } + + /// Appends a message to the transcript + pub fn push_transcript_data( + &mut self, + data: &[u8], + ) { + self.transcript.append_slice(data); + } + + /// Appends a `VrfInput` to the vrf inputs to be signed. + /// On failure, returns the `VrfInput`. + pub fn push_vrf_input( + &mut self, + vrf_input: VrfInput, + ) -> Result<(), VrfInput> { + self.vrf_inputs.try_push(vrf_input) + } + + /// Create challenge from input transcript within the signing data. + pub fn challenge(&self) -> [u8; N] { + let mut output = [0; N]; + let mut t = self.transcript.clone(); + let mut reader = t.challenge(b"Prehashed for Ed25519"); + reader.read_bytes(&mut output); + output + } + } + + /// VRF signature. + #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct VrfSignature { + /// VRF signature + pub signature: Signature, + /// VRF pre-outputs + pub vrf_outputs: VrfIosVec, + } + + #[cfg(feature = "full_crypto")] + impl VrfCrypto for Pair { + type VrfInput = VrfInput; + type VrfOutput = VrfOutput; + type VrfSignData = VrfSignData; + type VrfSignature = VrfSignature; + } + + #[cfg(feature = "full_crypto")] + impl VrfSecret for Pair { + fn vrf_sign(&self, data: &Self::VrfSignData) -> Self::VrfSignature { + // Hack used because backend signature type is generic over the number of ios + // @burdges can we provide a vec or boxed version? + match data.vrf_inputs.len() { + 0 => self.vrf_sign_gen::<0>(data), + 1 => self.vrf_sign_gen::<1>(data), + 2 => self.vrf_sign_gen::<2>(data), + 3 => self.vrf_sign_gen::<3>(data), + _ => panic!("Max VRF inputs is set to: {}", MAX_VRF_IOS), + } + } + + fn vrf_output(&self, input: &Self::VrfInput) -> Self::VrfOutput { + let output = self.0 .0.vrf_preout(&input.0); + VrfOutput(output) + } + } + + impl VrfCrypto for Public { + type VrfInput = VrfInput; + type VrfOutput = VrfOutput; + type VrfSignData = VrfSignData; + type VrfSignature = VrfSignature; + } + + impl VrfPublic for Public { + fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { + let preouts_len = signature.vrf_outputs.len(); + if preouts_len != data.vrf_inputs.len() { + return false + } + // Hack used because backend signature type is generic over the number of ios + // @burdges can we provide a vec or boxed version? + match preouts_len { + 0 => self.vrf_verify_gen::<0>(data, signature), + 1 => self.vrf_verify_gen::<1>(data, signature), + 2 => self.vrf_verify_gen::<2>(data, signature), + 3 => self.vrf_verify_gen::<3>(data, signature), + _ => panic!("Max VRF input messages is set to: {}", MAX_VRF_IOS), + } + } + } + + #[cfg(feature = "full_crypto")] + impl Pair { + fn vrf_sign_gen(&self, data: &VrfSignData) -> VrfSignature { + let ios: Vec<_> = data + .vrf_inputs + .iter() + .map(|i| self.0.clone().0.vrf_inout(i.0.clone())) + .collect(); + + let signature: ThinVrfSignature = + self.0.sign_thin_vrf(data.transcript.clone(), ios.as_slice()); + + let mut sign_bytes = [0; SIGNATURE_SERIALIZED_LEN]; + signature + .signature + .serialize_compressed(sign_bytes.as_mut_slice()) + .expect("serialization can't fail"); + + let outputs: Vec<_> = signature.preoutputs.into_iter().map(VrfOutput).collect(); + let outputs = VrfIosVec::truncate_from(outputs); + VrfSignature { signature: Signature(sign_bytes), vrf_outputs: outputs } + } + + /// Generate output bytes from the given VRF input. + /// + /// Index is relative to one of the `VrfInput` messages used during construction. + pub fn make_bytes( + &self, + context: &'static [u8], + input: &VrfInput, + ) -> [u8; N] { + let transcript = Transcript::new_labeled(context); + let inout = self.0.clone().0.vrf_inout(input.0.clone()); + inout.vrf_output_bytes(transcript) + } + } + + impl Public { + fn vrf_verify_gen( + &self, + data: &VrfSignData, + signature: &VrfSignature, + ) -> bool { + let Ok(public) = PublicKey::deserialize_compressed(self.as_ref()) else { + return false + }; + + let Ok(preouts) = signature + .vrf_outputs + .iter() + .map(|o| o.0.clone()) + .collect::>() + .into_inner() else { + return false + }; + + // Deserialize only the proof, the rest has already been deserialized + // This is another hack used because backend signature type is generic over the number + // of ios. @burdges can we provide a vec or boxed version? + let Ok(signature) = ThinVrfSignature::<0>::deserialize_compressed(signature.signature.as_ref()).map(|s| s.signature) else { + return false + }; + let signature = ThinVrfSignature { signature, preoutputs: preouts }; + + let inputs = data.vrf_inputs.iter().map(|i| i.0.clone()); + + signature.verify_thin_vrf(data.transcript.clone(), inputs, &public).is_ok() + } + } + + impl VrfOutput { + /// Generate output bytes for the given VRF input. + pub fn make_bytes( + &self, + context: &'static [u8], + input: &VrfInput, + ) -> [u8; N] { + let transcript = Transcript::new_labeled(context); + let inout = + bandersnatch_vrfs::VrfInOut { input: input.0.clone(), preoutput: self.0.clone() }; + inout.vrf_output_bytes(transcript) + } + } +} + +#[cfg(test)] +mod tests { + use super::{vrf::*, *}; + use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; + const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0; SEED_SERIALIZED_LEN]; + + #[allow(unused)] + fn b2h(bytes: &[u8]) -> String { + array_bytes::bytes2hex("", bytes) + } + + fn h2b(hex: &str) -> Vec { + array_bytes::hex2bytes_unchecked(hex) + } + + #[test] + fn backend_assumptions_check() { + let pair = SecretKey::from_seed(DEV_SEED); + let public = pair.to_public(); + + assert_eq!(public.0.size_of_serialized(), PUBLIC_SERIALIZED_LEN); + } + + #[test] + fn derive_hard_known_pair() { + let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); + // known address of DEV_PHRASE with 1.1 + let known = h2b("b0d3648bd5a3542afa16c06fee04cba37cc55c83a8894d36d87897bda0c65eec"); + assert_eq!(pair.public().as_ref(), known); + } + + #[test] + fn verify_known_signature() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + let signature_raw = + h2b("524b0cbc4eb9579e2cd115fe55e2625e8265b3ea599ac903e67b08c2c669780cf43ca9c1e0a8a63c1dba121a606f95d3466cfe1880acc502c2792775125a7fcc" + ); + let signature = Signature::from_slice(&signature_raw).unwrap(); + + assert!(Pair::verify(&signature, b"hello", &public)); + } + + #[test] + fn sign_verify() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + let msg = b"hello"; + + let signature = pair.sign(msg); + assert!(Pair::verify(&signature, msg, &public)); + } + + #[test] + fn vrf_sign_verify() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + let i3 = VrfInput::new(b"in3", &[(b"domy", b"yay"), (b"domz", b"nay")]); + + let data = VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1, i2, i3]).unwrap(); + + let signature = pair.vrf_sign(&data); + + assert!(public.vrf_verify(&data, &signature)); + } + + #[test] + fn vrf_sign_verify_bad_inputs() { + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + + let data = + VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]).unwrap(); + + let signature = pair.vrf_sign(&data); + + let data = VrfSignData::from_iter(b"mydata", &[b"data"], [i1, i2.clone()]).unwrap(); + assert!(!public.vrf_verify(&data, &signature)); + + let data = VrfSignData::from_iter(b"mydata", &[b"tdata"], [i2]).unwrap(); + assert!(!public.vrf_verify(&data, &signature)); + } + + #[test] + fn vrf_make_bytes_matches() { + let pair = Pair::from_seed(DEV_SEED); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + let data = + VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]).unwrap(); + let signature = pair.vrf_sign(&data); + + let o10 = pair.make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.vrf_outputs[0].make_bytes::<32>(b"ctx1", &i1); + assert_eq!(o10, o11); + + let o20 = pair.make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.vrf_outputs[1].make_bytes::<48>(b"ctx2", &i2); + assert_eq!(o20, o21); + } + + #[test] + fn encode_decode_vrf_signature() { + // Transcript data is hashed together and signed. + // It doesn't contribute to serialized length. + let pair = Pair::from_seed(DEV_SEED); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + let data = + VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]).unwrap(); + let expected = pair.vrf_sign(&data); + + let bytes = expected.encode(); + + let decoded = VrfSignature::decode(&mut &bytes[..]).unwrap(); + assert_eq!(expected, decoded); + + let data = VrfSignData::from_iter(b"mydata", &[b"tdata"], []).unwrap(); + let expected = pair.vrf_sign(&data); + + let bytes = expected.encode(); + + let decoded = VrfSignature::decode(&mut &bytes[..]).unwrap(); + assert_eq!(expected, decoded); + } +} diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 04a71f93ee168..1f66965d785fc 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -15,9 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] //! Cryptographic utilities. -// end::description[] use crate::{ed25519, sr25519}; #[cfg(feature = "std")] @@ -480,7 +478,7 @@ pub trait ByteArray: AsRef<[u8]> + AsMut<[u8]> + for<'a> TryFrom<&'a [u8], Error } /// Trait suitable for typical cryptographic key public type. -pub trait Public: ByteArray + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync {} +pub trait Public: ByteArray + Derive + CryptoType + PartialEq + Eq + Clone + Send {} /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -828,7 +826,7 @@ impl sp_std::str::FromStr for SecretUri { /// /// For now it just specifies how to create a key from a phrase and derivation path. #[cfg(feature = "full_crypto")] -pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { +pub trait Pair: CryptoType + Sized + Clone + Send + 'static { /// The type which is used to encode a public key. type Public: Public + Hash; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index f9541b02e2903..d00bc4dd70d5f 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -55,6 +55,7 @@ pub mod crypto; pub mod hexdisplay; pub use paste; +pub mod bandersnatch; #[cfg(feature = "bls-experimental")] pub mod bls; pub mod defer; diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index 6faf4ffa3042a..a1889d6779af6 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -21,10 +21,12 @@ use crate::crypto::KeyTypeId; /// Key type for generic Ed25519 key. pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); -/// Key type for generic Sr 25519 key. +/// Key type for generic Sr25519 key. pub const SR25519: KeyTypeId = KeyTypeId(*b"sr25"); /// Key type for generic ECDSA key. pub const ECDSA: KeyTypeId = KeyTypeId(*b"ecds"); +/// Key type for generic Bandersnatch key. +pub const BANDERSNATCH: KeyTypeId = KeyTypeId(*b"bb12"); /// Key type for generic BLS12-377 key. pub const BLS377: KeyTypeId = KeyTypeId(*b"bls7"); /// Key type for generic BLS12-381 key. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 750b5d5924637..050effbb1a003 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -45,6 +45,7 @@ use sp_core::{ use sp_keystore::KeystoreExt; use sp_core::{ + bandersnatch, crypto::KeyTypeId, ecdsa, ed25519, offchain::{ @@ -1140,6 +1141,19 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadSignature)?; Ok(pubkey.serialize()) } + + /// DAVXY + fn bandersnatch_generate( + &mut self, + id: KeyTypeId, + seed: Option>, + ) -> bandersnatch::Public { + let seed = seed.as_ref().map(|s| std::str::from_utf8(s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .bandersnatch_generate_new(id, seed) + .expect("`bandernatch_generate` failed") + } } /// Interface that provides functions for hashing with different algorithms. diff --git a/primitives/keyring/src/bandersnatch.rs b/primitives/keyring/src/bandersnatch.rs new file mode 100644 index 0000000000000..a61e9dafef877 --- /dev/null +++ b/primitives/keyring/src/bandersnatch.rs @@ -0,0 +1,241 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Support code for the runtime. A set of test accounts. + +pub use sp_core::bandersnatch; +use sp_core::{ + bandersnatch::{Pair, Public, Signature}, + crypto::UncheckedFrom, + ByteArray, Pair as PairT, H256, +}; +use sp_runtime::AccountId32; + +use lazy_static::lazy_static; +use std::{collections::HashMap, ops::Deref, sync::Mutex}; + +/// Set of test accounts. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +pub enum Keyring { + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, +} + +impl Keyring { + pub fn from_public(who: &Public) -> Option { + Self::iter().find(|&k| &Public::from(k) == who) + } + + pub fn from_account_id(who: &AccountId32) -> Option { + Self::iter().find(|&k| &k.to_account_id() == who) + } + + pub fn from_raw_public(who: [u8; 32]) -> Option { + Self::from_public(&Public::unchecked_from(who)) + } + + pub fn to_raw_public(self) -> [u8; 32] { + *Public::from(self).as_ref() + } + + pub fn from_h256_public(who: H256) -> Option { + Self::from_public(&Public::unchecked_from(who.into())) + } + + pub fn to_h256_public(self) -> H256 { + AsRef::<[u8; 32]>::as_ref(&Public::from(self)).into() + } + + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } + + pub fn to_account_id(self) -> AccountId32 { + self.to_raw_public().into() + } + + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } + + /// Returns an iterator over all test accounts. + pub fn iter() -> impl Iterator { + ::iter() + } + + pub fn public(self) -> Public { + self.pair().public() + } + + pub fn to_seed(self) -> String { + format!("//{}", self) + } + + /// Create a crypto `Pair` from a numeric value. + pub fn numeric(idx: usize) -> Pair { + Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed") + } + + /// Get account id of a `numeric` account. + pub fn numeric_id(idx: usize) -> AccountId32 { + (*AsRef::<[u8; 32]>::as_ref(&Self::numeric(idx).public())).into() + } +} + +impl From for &'static str { + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } +} + +#[derive(Debug)] +pub struct ParseKeyringError; + +impl std::fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ParseKeyringError") + } +} + +impl std::str::FromStr for Keyring { + type Err = ParseKeyringError; + + fn from_str(s: &str) -> Result::Err> { + match s { + "Alice" => Ok(Keyring::Alice), + "Bob" => Ok(Keyring::Bob), + "Charlie" => Ok(Keyring::Charlie), + "Dave" => Ok(Keyring::Dave), + "Eve" => Ok(Keyring::Eve), + "Ferdie" => Ok(Keyring::Ferdie), + "One" => Ok(Keyring::One), + "Two" => Ok(Keyring::Two), + _ => Err(ParseKeyringError), + } + } +} + +lazy_static! { + static ref PRIVATE_KEYS: Mutex> = + Mutex::new(Keyring::iter().map(|who| (who, who.pair())).collect()); + static ref PUBLIC_KEYS: HashMap = PRIVATE_KEYS + .lock() + .unwrap() + .iter() + .map(|(&who, pair)| (who, pair.public())) + .collect(); +} + +impl From for AccountId32 { + fn from(k: Keyring) -> Self { + k.to_account_id() + } +} + +impl From for Public { + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap() + } +} + +impl From for Pair { + fn from(k: Keyring) -> Self { + k.pair() + } +} + +impl From for [u8; 32] { + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_ref() + } +} + +impl From for H256 { + fn from(k: Keyring) -> Self { + AsRef::<[u8; 32]>::as_ref(PUBLIC_KEYS.get(&k).unwrap()).into() + } +} + +impl From for &'static [u8; 32] { + fn from(k: Keyring) -> Self { + PUBLIC_KEYS.get(&k).unwrap().as_ref() + } +} + +impl AsRef<[u8; 32]> for Keyring { + fn as_ref(&self) -> &[u8; 32] { + PUBLIC_KEYS.get(self).unwrap().as_ref() + } +} + +impl AsRef for Keyring { + fn as_ref(&self) -> &Public { + PUBLIC_KEYS.get(self).unwrap() + } +} + +impl Deref for Keyring { + type Target = [u8; 32]; + fn deref(&self) -> &[u8; 32] { + PUBLIC_KEYS.get(self).unwrap().as_ref() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{bandersnatch::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); + } +} diff --git a/primitives/keyring/src/lib.rs b/primitives/keyring/src/lib.rs index 7432aff12544a..fba15a121e818 100644 --- a/primitives/keyring/src/lib.rs +++ b/primitives/keyring/src/lib.rs @@ -23,11 +23,15 @@ pub mod sr25519; /// Test account crypto for ed25519. pub mod ed25519; +/// Test account crypto for bandersnatch. +pub mod bandersnatch; + /// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, /// since it tends to be used for accounts (although it may also be used /// by authorities). pub use sr25519::Keyring as AccountKeyring; +pub use bandersnatch::Keyring as BandersnatchKeyring; pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 1d2a27cb8726c..d67f2cb2d5c69 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -19,12 +19,13 @@ pub mod testing; -#[cfg(feature = "bls-experimental")] -use sp_core::{bls377, bls381}; use sp_core::{ + bandersnatch, crypto::{ByteArray, CryptoTypeId, KeyTypeId}, ecdsa, ed25519, sr25519, }; +#[cfg(feature = "bls-experimental")] +use sp_core::{bls377, bls381}; use std::sync::Arc; @@ -174,6 +175,40 @@ pub trait Keystore: Send + Sync { msg: &[u8; 32], ) -> Result, Error>; + /// DAVXY TODO + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec; + + /// DAVXY TODO + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result; + + /// DAVXY TODO + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> Result, Error>; + + /// DAVXY TODO + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + ) -> Result, Error>; + + /// DAVXY TODO + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> Result, Error>; + #[cfg(feature = "bls-experimental")] /// Returns all bls12-381 public keys for the given key type. fn bls381_public_keys(&self, id: KeyTypeId) -> Vec; @@ -258,6 +293,7 @@ pub trait Keystore: Send + Sync { /// - sr25519 /// - ed25519 /// - ecdsa + /// - bandersnatch /// - bls381 /// - bls377 /// @@ -291,6 +327,11 @@ pub trait Keystore: Send + Sync { self.ecdsa_sign(id, &public, msg)?.map(|s| s.encode()) }, + bandersnatch::CRYPTO_ID => { + let public = bandersnatch::Public::from_slice(public) + .map_err(|_| Error::ValidationError("Invalid public key format".into()))?; + self.bandersnatch_sign(id, &public, msg)?.map(|s| s.encode()) + }, #[cfg(feature = "bls-experimental")] bls381::CRYPTO_ID => { let public = bls381::Public::from_slice(public) diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index e18931a7af883..b9c685397fb6f 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -19,12 +19,13 @@ use crate::{Error, Keystore, KeystorePtr}; -#[cfg(feature = "bls-experimental")] -use sp_core::{bls377, bls381}; use sp_core::{ + bandersnatch, crypto::{ByteArray, KeyTypeId, Pair, VrfSecret}, ecdsa, ed25519, sr25519, }; +#[cfg(feature = "bls-experimental")] +use sp_core::{bls377, bls381}; use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; @@ -214,6 +215,45 @@ impl Keystore for MemoryKeystore { Ok(sig) } + fn bandersnatch_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.public_keys::(key_type) + } + + fn bandersnatch_generate_new( + &self, + key_type: KeyTypeId, + seed: Option<&str>, + ) -> Result { + self.generate_new::(key_type, seed) + } + + fn bandersnatch_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + msg: &[u8], + ) -> Result, Error> { + self.sign::(key_type, public, msg) + } + + fn bandersnatch_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + ) -> Result, Error> { + self.vrf_sign::(key_type, public, data) + } + + fn bandersnatch_vrf_output( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfInput, + ) -> Result, Error> { + self.vrf_output::(key_type, public, input) + } + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 8b1dee38726ad..f8e3778953759 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,7 +18,7 @@ sp-application-crypto = { version = "7.0.0", default-features = false, path = ". sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } -sp-consensus-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } @@ -36,7 +36,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } -pallet-sassafras = { version = "0.3.1-dev", default-features = false, path = "../../frame/sassafras" } +pallet-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../frame/balances" } pallet-root-testing = { version = "1.0.0-dev", default-features = false, path = "../../frame/root-testing" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../frame/sudo" } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 5e730800dc028..0323ab6053403 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -23,11 +23,11 @@ use super::{ use codec::Encode; use sc_service::construct_genesis_block; use sp_core::{ - sr25519, + bandersnatch, sr25519, storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::{AccountKeyring, Sr25519Keyring}; +use sp_keyring::AccountKeyring; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -54,9 +54,9 @@ impl Default for GenesisStorageBuilder { fn default() -> Self { Self::new( vec![ - Sr25519Keyring::Alice.into(), - Sr25519Keyring::Bob.into(), - Sr25519Keyring::Charlie.into(), + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), ], (0..16_usize) .into_iter() @@ -109,11 +109,18 @@ impl GenesisStorageBuilder { /// Builds the `GenesisConfig` and returns its storage. pub fn build(self) -> Storage { - let authorities_sr25519: Vec<_> = self + let authorities_sr25519: Vec = + self.authorities.clone().into_iter().map(|id| id.into()).collect(); + + let authorities_bandersnatch: Vec = self .authorities - .clone() - .into_iter() - .map(|id| sr25519::Public::from(id)) + .iter() + .map(|id| { + use sp_keyring::bandersnatch::Keyring as BandersnatchKeyring; + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + BandersnatchKeyring::from_str(&seed).unwrap().into() + }) .collect(); let genesis_config = GenesisConfig { @@ -129,11 +136,7 @@ impl GenesisStorageBuilder { epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), }, sassafras: pallet_sassafras::GenesisConfig { - authorities: authorities_sr25519 - .clone() - .into_iter() - .map(|x| (x.into(), 1)) - .collect(), + authorities: authorities_bandersnatch.into_iter().map(|x| (x.into(), 1)).collect(), epoch_config: sp_consensus_sassafras::SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32, diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 9963acde47d0b..b1ee221a99a59 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -135,20 +135,6 @@ pub struct TransferData { pub amount: Balance, pub nonce: Index, } -// TODO DAVXY -// impl From> for Extrinsic { -// fn from(call: pallet_sassafras::Call) -> Self { -// use pallet_sassafras::Call; -// match call { -// Call::submit_tickets { tickets: _ } => Extrinsic::Sassafras, -// Call::plan_config_change { config: _ } => Extrinsic::Sassafras, -// Call::report_equivocation_unsigned { equivocation_proof: _ } => Extrinsic::Sassafras, -// _ => panic!( -// "Unexpected Sassafras call type: {:?}, unable to converto to Extrinsic", -// call -// ), -// } -// } /// The address format for describing accounts. pub type Address = sp_core::sr25519::Public; @@ -715,8 +701,9 @@ impl_runtime_apis! { Sassafras::slot_ticket_id(slot) } - fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, - sp_consensus_sassafras::TicketData)> { + fn slot_ticket( + slot: sp_consensus_sassafras::Slot + ) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketData)> { Sassafras::slot_ticket(slot) } @@ -921,6 +908,10 @@ pub mod storage_key_generator { vec![b"Babe", b"NextAuthorities"], vec![b"Babe", b"SegmentIndex"], vec![b"Babe", b":__STORAGE_VERSION__:"], + vec![b"Sassafras", b":__STORAGE_VERSION__:"], + vec![b"Sassafras", b"EpochConfig"], + vec![b"Sassafras", b"Authorities"], + vec![b"Sassafras", b"NextAuthorities"], vec![b"Balances", b":__STORAGE_VERSION__:"], vec![b"Balances", b"TotalIssuance"], vec![b"SubstrateTest", b"Authorities"], @@ -975,31 +966,30 @@ pub mod storage_key_generator { /// origin. pub fn get_expected_storage_hashed_keys() -> Vec { [ - //System|:__STORAGE_VERSION__: + // System|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", - //SubstrateTest|Authorities + // SubstrateTest|Authorities "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|:__STORAGE_VERSION__: + // Babe|:__STORAGE_VERSION__: "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", - //Babe|Authorities + // Babe|Authorities "1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|SegmentIndex + // Babe|SegmentIndex "1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4", - //Babe|NextAuthorities + // Babe|NextAuthorities "1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c", - //Babe|EpochConfig + // Babe|EpochConfig "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", - //System|:__STORAGE_VERSION__: + // System|:__STORAGE_VERSION__: "26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429", - //System|UpgradedToU32RefCount + // System|UpgradedToU32RefCount "26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710", - //System|ParentHash + // System|ParentHash "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", - //System::BlockHash|0 + // System::BlockHash|0 "26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000", - //System|UpgradedToTripleRefCount + // System|UpgradedToTripleRefCount "26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439", - // System|Account|blake2_128Concat("//11") "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da901cae4e3edfbb32c91ed3f01ab964f4eeeab50338d8e5176d3141802d7b010a55dadcd5f23cf8aaafa724627e967e90e", // System|Account|blake2_128Concat("//4") @@ -1046,6 +1036,14 @@ pub mod storage_key_generator { "3a65787472696e7369635f696e646578", // :heappages "3a686561707061676573", + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", // Balances|:__STORAGE_VERSION__: "c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429", // Balances|TotalIssuance @@ -1078,6 +1076,32 @@ mod tests { prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, }; + fn babe_pre_digest() -> DigestItem { + use sp_consensus_babe::digests::{ + CompatibleDigestItem, PreDigest, SecondaryPlainPreDigest, + }; + DigestItem::babe_pre_digest(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + authority_index: 0, + slot: 0.into(), + })) + } + + fn sassafras_pre_digest() -> DigestItem { + use sp_consensus_sassafras::{ + digests::{CompatibleDigestItem, PreDigest}, + slot_claim_sign_data, AuthorityPair, + }; + use sp_core::crypto::{Pair, VrfSecret}; + let data = slot_claim_sign_data(&Default::default(), 0.into(), 0); + let vrf_signature = AuthorityPair::from_seed(&[0u8; 32]).as_ref().vrf_sign(&data); + DigestItem::sassafras_pre_digest(PreDigest { + authority_idx: 0, + slot: 0.into(), + vrf_signature, + ticket_claim: None, + }) + } + #[test] fn heap_pages_is_respected() { // This tests that the on-chain `HEAP_PAGES` parameter is respected. @@ -1102,7 +1126,8 @@ mod tests { // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to // ~2048k of heap memory. let (new_at_hash, block) = { - let mut builder = client.new_block(Default::default()).unwrap(); + let digest = Digest { logs: vec![babe_pre_digest(), sassafras_pre_digest()] }; + let mut builder = client.new_block(digest).unwrap(); builder.push_storage_change(HEAP_PAGES.to_vec(), Some(32u64.encode())).unwrap(); let block = builder.build().unwrap().block; let hash = block.header.hash(); From f7bb072b767c6b87be44973b19988e9b17e179bc Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 19 May 2023 10:25:34 +0200 Subject: [PATCH 32/62] rustfmt --- client/consensus/sassafras/src/authorship.rs | 15 +++++------ client/consensus/sassafras/src/tests.rs | 25 +++++++++++++------ .../consensus/sassafras/src/verification.rs | 11 +++++--- primitives/core/src/bandersnatch.rs | 10 ++------ 4 files changed, 33 insertions(+), 28 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 17da58e99f2f5..e922f17b70d3f 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -24,7 +24,7 @@ use sp_consensus_sassafras::{ digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketClaim, TicketData, TicketEnvelope, TicketId, }; -use sp_core::{twox_64, ByteArray, ed25519}; +use sp_core::{ed25519, twox_64, ByteArray}; use std::pin::Pin; @@ -231,14 +231,11 @@ where self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; let mut epoch_changes = self.epoch_changes.shared_data_locked(); - let mut epoch = epoch_changes.viable_epoch_mut(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot))?; - - let claim = authorship::claim_slot( - slot, - &mut epoch.as_mut(), - maybe_ticket, - &self.keystore, - ); + let mut epoch = epoch_changes.viable_epoch_mut(epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + })?; + + let claim = authorship::claim_slot(slot, &mut epoch.as_mut(), maybe_ticket, &self.keystore); if claim.is_some() { debug!(target: LOG_TARGET, "Claimed slot {}", slot); } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 1438962d960f7..65363388e6405 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -433,8 +433,12 @@ fn claim_primary_slots_works() { // Fail if we have authority key in our keystore but not ticket aux data // ticket-aux: KO , authority-key: OK => FAIL - let claim = - authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data.clone())), &keystore); + let claim = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_data.clone())), + &keystore, + ); assert!(claim.is_none()); assert!(epoch.tickets_aux.is_empty()); @@ -442,11 +446,17 @@ fn claim_primary_slots_works() { // Success if we have ticket aux data and the authority key in our keystore // ticket-aux: OK , authority-key: OK => SUCCESS - epoch.tickets_aux.insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); + epoch + .tickets_aux + .insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); - let (pre_digest, auth_id) = - authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data.clone())), &keystore) - .unwrap(); + let (pre_digest, auth_id) = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_data.clone())), + &keystore, + ) + .unwrap(); assert!(epoch.tickets_aux.is_empty()); assert_eq!(pre_digest.authority_idx, alice_authority_idx); @@ -457,7 +467,8 @@ fn claim_primary_slots_works() { epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); - let claim = authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data)), &keystore); + let claim = + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data)), &keystore); assert!(claim.is_none()); assert!(epoch.tickets_aux.is_empty()); } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 5a5dfc457ef80..f149f9758c032 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -94,7 +94,8 @@ fn check_header( // Optionally check ticket ownership - let mut vrf_sign_data = slot_claim_sign_data(&config.randomness, pre_digest.slot, epoch.epoch_idx); + let mut vrf_sign_data = + slot_claim_sign_data(&config.randomness, pre_digest.slot, epoch.epoch_idx); match (&maybe_ticket, &pre_digest.ticket_claim) { (Some((_ticket_id, ticket_data)), Some(ticket_claim)) => { @@ -104,8 +105,7 @@ fn check_header( let challenge = vrf_sign_data.challenge::<32>(); let erased_public = ed25519::Public::from_raw(ticket_data.erased_public); - let erased_signature = - ed25519::Signature::from_raw(ticket_claim.erased_signature); + let erased_signature = ed25519::Signature::from_raw(ticket_claim.erased_signature); if !ed25519::Pair::verify(&erased_signature, &challenge, &erased_public) { return Err(sassafras_err(Error::BadSignature(pre_hash))) @@ -132,7 +132,10 @@ fn check_header( // Check per-slot vrf proof - if !authority_id.as_inner_ref().vrf_verify(&vrf_sign_data, &pre_digest.vrf_signature) { + if !authority_id + .as_inner_ref() + .vrf_verify(&vrf_sign_data, &pre_digest.vrf_signature) + { return Err(sassafras_err(Error::VrfVerificationFailed)) } diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index f0af2ae705911..32a15ffc2f760 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -382,19 +382,13 @@ pub mod vrf { } /// Appends a message to the transcript - pub fn push_transcript_data( - &mut self, - data: &[u8], - ) { + pub fn push_transcript_data(&mut self, data: &[u8]) { self.transcript.append_slice(data); } /// Appends a `VrfInput` to the vrf inputs to be signed. /// On failure, returns the `VrfInput`. - pub fn push_vrf_input( - &mut self, - vrf_input: VrfInput, - ) -> Result<(), VrfInput> { + pub fn push_vrf_input(&mut self, vrf_input: VrfInput) -> Result<(), VrfInput> { self.vrf_inputs.try_push(vrf_input) } From fb4a84176e14c2dff2828d0e5d582abc482ae9d9 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 19 May 2023 17:34:35 +0200 Subject: [PATCH 33/62] Fix bench releated stuff --- bin/node-sassafras/runtime/Cargo.toml | 2 +- bin/node-sassafras/runtime/src/lib.rs | 14 ++------- frame/sassafras/src/benchmarking.rs | 43 ++++++++++++++------------- frame/sassafras/src/mock.rs | 7 +++-- 4 files changed, 30 insertions(+), 36 deletions(-) diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index a2b99a096956c..1702e48ab6cf7 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -81,7 +81,7 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", - "frame-system-benchmarking", + "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 0f045fe0879c9..cc9f3aa39e9bb 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -549,18 +549,8 @@ impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs index 2f1818e5b52cd..fdf3f742bfb57 100644 --- a/frame/sassafras/src/benchmarking.rs +++ b/frame/sassafras/src/benchmarking.rs @@ -18,37 +18,40 @@ //! Benchmarks for the Sassafras pallet. use super::*; + use frame_benchmarking::benchmarks; use frame_system::RawOrigin; -use sp_io::hashing; - -fn make_dummy_ticket(i: usize) -> Ticket { - let buf = i.to_le_bytes(); - hashing::twox_256(&buf).try_into().unwrap() +use sp_consensus_sassafras::VrfOutput; + +// Makes a dummy ticket envelope. +// The resulting ticket-id is not very important and is expected to be below the +// configured threshold (which is guaranteed because we are using mock::TEST_EPOCH_CONFIGURATION). +fn make_dummy_ticket(attempt_idx: u32) -> TicketEnvelope { + let mut output_enc: &[u8] = &[ + 0x0c, 0x1a, 0x83, 0x5e, 0x56, 0x9b, 0x18, 0xa0, 0xd9, 0x13, 0x39, 0x7e, 0xb9, 0x5a, 0x39, + 0x83, 0xf3, 0xc5, 0x73, 0xf6, 0xb1, 0x35, 0xa6, 0x48, 0xa3, 0x83, 0xac, 0x3b, 0xb8, 0x43, + 0xa7, 0x3d, + ]; + let output = VrfOutput::decode(&mut output_enc).unwrap(); + let data = TicketData { + attempt_idx, + erased_public: Default::default(), + revealed_public: Default::default(), + }; + TicketEnvelope { data, vrf_preout: output, ring_proof: () } } benchmarks! { submit_tickets { - let x in 0 .. 100; - - // Almost fill the available tickets space. - - let max_tickets: u32 = ::MaxTickets::get() - 10; - let tickets: Vec = (0..max_tickets as usize).into_iter().map(|i| { - make_dummy_ticket(i) - }).collect(); - let _ = Pallet::::submit_tickets(RawOrigin::None.into(), tickets); - - // Create the tickets to submit during the benchmark + let x in 0 .. ::MaxTickets::get(); - let tickets: Vec = (0..x as usize).into_iter().map(|i| { - make_dummy_ticket(i + max_tickets as usize) - }).collect(); + let tickets: BoundedVec::MaxTickets> = + (0..x).map(make_dummy_ticket).collect::>().try_into().unwrap(); }: _(RawOrigin::None, tickets) impl_benchmark_test_suite!( Pallet, - crate::mock::new_test_ext(3), + crate::mock::new_test_ext(1), crate::mock::Test, ) } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 5dde8abc11abd..fa3dfcc83593d 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -106,9 +106,10 @@ frame_support::construct_runtime!( } ); -// Default used under tests. -// The max redundancy factor allows to accept all submitted tickets without worrying -// about the threshold. +// Default used for most of the tests and benchmarks. +// +// The redundancy factor has been set to max value to accept all submitted +// tickets without worrying about the threshold. pub const TEST_EPOCH_CONFIGURATION: SassafrasEpochConfiguration = SassafrasEpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 32 }; From f8003bdbf62e12d0e6b009e8a38437bc6ac1e9c0 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 12 Jun 2023 17:01:49 +0200 Subject: [PATCH 34/62] Bump ring-vrf related crates versions --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a3dddeb9ffc2..6cefcf3e9399d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -399,7 +399,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +source = "git+https://github.com/w3f/ring-vrf#9dfb20539d15ca754c2d66916e356a95e5eba68e" dependencies = [ "ark-ec", "ark-ff", @@ -447,7 +447,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +source = "git+https://github.com/w3f/ring-vrf#9dfb20539d15ca754c2d66916e356a95e5eba68e" dependencies = [ "ark-ff", "ark-serialize", @@ -713,7 +713,7 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" version = "0.0.1" -source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +source = "git+https://github.com/w3f/ring-vrf#9dfb20539d15ca754c2d66916e356a95e5eba68e" dependencies = [ "ark-bls12-381", "ark-ec", @@ -1361,7 +1361,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#1e42bb632263f4dff86b400ec9a13af21db72360" +source = "git+https://github.com/w3f/ring-proof#16fe0cd54ef92134528e8c1418a77ba493746274" dependencies = [ "ark-ec", "ark-ff", @@ -2109,7 +2109,7 @@ checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf#4957177a717c7555c8df2869012201017b62e66b" +source = "git+https://github.com/w3f/ring-vrf#9dfb20539d15ca754c2d66916e356a95e5eba68e" dependencies = [ "ark-ec", "ark-ff", @@ -2455,7 +2455,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#f60bc946e2a4340b1c2d00d30c654e82a5887983" +source = "git+https://github.com/w3f/fflonk#afe712a7d2edf1aa7a2e34a416e3f513f97a2cf9" dependencies = [ "ark-ec", "ark-ff", @@ -8644,7 +8644,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#1e42bb632263f4dff86b400ec9a13af21db72360" +source = "git+https://github.com/w3f/ring-proof#16fe0cd54ef92134528e8c1418a77ba493746274" dependencies = [ "ark-ec", "ark-ff", @@ -12904,7 +12904,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.6", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] From afe252e935427de99c5ea85e658fcdd4908dd7ee Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 12 Jun 2023 20:08:43 +0200 Subject: [PATCH 35/62] Fix after master merge --- Cargo.lock | 310 ++++++++++++++++++++-- Cargo.toml | 11 +- bin/node-sassafras/node/Cargo.toml | 6 +- bin/node-sassafras/runtime/Cargo.toml | 8 +- bin/node-sassafras/runtime/src/lib.rs | 2 +- client/consensus/sassafras/Cargo.toml | 10 +- frame/sassafras/Cargo.toml | 15 +- frame/sassafras/src/lib.rs | 2 +- primitives/consensus/sassafras/Cargo.toml | 30 ++- primitives/consensus/sassafras/src/lib.rs | 4 +- primitives/core/src/bandersnatch.rs | 2 +- primitives/crypto/ec-utils/Cargo.toml | 2 +- 12 files changed, 345 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcc47433460ee..ee1f286d7b6ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -478,6 +478,20 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.7", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "ark-serialize" version = "0.4.2" @@ -511,6 +525,18 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-transcript" +version = "0.0.2" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.7", + "rand_core 0.6.4", + "sha3", +] + [[package]] name = "array-bytes" version = "4.2.0" @@ -764,6 +790,26 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.6", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.11" @@ -1392,6 +1438,19 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "common-path" version = "1.0.0" @@ -2131,6 +2190,21 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" +[[package]] +name = "dleq_vrf" +version = "0.0.2" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.2", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "doc-comment" version = "0.3.3" @@ -2488,6 +2562,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -4542,7 +4628,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen 0.10.0", - "ring", + "ring 0.16.20", "rustls 0.20.8", "thiserror", "webpki 0.22.0", @@ -5001,6 +5087,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5556,6 +5654,84 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.2-dev" +dependencies = [ + "clap 4.3.2", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.2-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -7203,6 +7379,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.2-dev" +dependencies = [ + "array-bytes 4.2.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-application-crypto", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -8304,7 +8501,7 @@ checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", "rustls 0.20.8", "slab", @@ -8454,7 +8651,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "x509-parser 0.13.2", "yasna", @@ -8467,7 +8664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time 0.3.21", "yasna", ] @@ -8596,6 +8793,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -8763,7 +8974,7 @@ checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64 0.13.1", "log", - "ring", + "ring 0.16.20", "sct 0.6.1", "webpki 0.21.4", ] @@ -8775,7 +8986,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct 0.7.0", "webpki 0.22.0", ] @@ -9419,6 +9630,43 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.2-dev" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -10334,7 +10582,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -10360,7 +10608,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10370,7 +10618,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10708,7 +10956,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version 0.4.0", "sha2 0.10.6", "subtle", @@ -11085,6 +11333,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.2-dev" +dependencies = [ + "async-trait", + "merlin 2.0.1", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -11101,6 +11369,8 @@ name = "sp-core" version = "21.0.0" dependencies = [ "array-bytes 4.2.0", + "arrayvec 0.7.2", + "bandersnatch_vrfs", "bitflags", "blake2", "bounded-collections", @@ -11116,7 +11386,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -11838,7 +12108,7 @@ dependencies = [ "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -12025,6 +12295,7 @@ dependencies = [ "pallet-balances", "pallet-beefy-mmr", "pallet-root-testing", + "pallet-sassafras", "pallet-sudo", "pallet-timestamp", "parity-scale-codec", @@ -12041,6 +12312,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-debug-derive", "sp-externalities", @@ -12872,7 +13144,7 @@ dependencies = [ "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -12887,7 +13159,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] @@ -13551,7 +13823,7 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13561,7 +13833,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -13590,7 +13862,7 @@ dependencies = [ "rand 0.8.5", "rcgen 0.9.3", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", "rustls 0.19.1", @@ -13655,7 +13927,7 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "rcgen 0.9.3", - "ring", + "ring 0.16.20", "rustls 0.19.1", "sec1 0.3.0", "serde", @@ -14102,7 +14374,7 @@ dependencies = [ "lazy_static", "nom", "oid-registry 0.4.0", - "ring", + "ring 0.16.20", "rusticata-macros", "thiserror", "time 0.3.21", diff --git a/Cargo.toml b/Cargo.toml index 61207534f7b18..240b540b39cf5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -343,5 +343,12 @@ lto = "fat" # https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units codegen-units = 1 -#[patch."https://github.com/w3f/ring-vrf"] -# bandersnatch_vrfs = { path = "/mnt/ssd/users/develop/w3f/ring-vrf/bandersnatch_vrfs" } +[patch."https://github.com/w3f/ring-vrf"] +bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } + +[patch."https://github.com/w3f/fflonk"] +fflonk = { path = "/mnt/ssd/develop/w3f/fflonk" } + +[patch."https://github.com/w3f/ring-proof"] +common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } +ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 00bd11461ad97..db6c93652dc5c 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -19,7 +19,7 @@ name = "node-sassafras" clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-service = { version = "0.10.0-dev", path = "../../../client/service" } @@ -34,10 +34,10 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 1702e48ab6cf7..e3298256a2388 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -29,14 +29,14 @@ frame-executive = { version = "4.0.0-dev", default-features = false, path = "../ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } -sp-core = { version = "7.0.0", default-features = false, path = "../../../primitives/core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index cc9f3aa39e9bb..0244ae8f87b80 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -235,7 +235,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = (); type MaxFreezes = (); - type HoldIdentifier = (); + type RuntimeHoldReason = (); type MaxHolds = (); } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index c6b5277095772..6037302d4a65d 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -28,22 +28,22 @@ sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-application-crypto = { version = "7.0.0", path = "../../../primitives/application-crypto" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-consensus-sassafras = { version = "0.3.2-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -sp-core = { version = "7.0.0", path = "../../../primitives/core" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } -sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sp-keyring = { version = "7.0.0", path = "../../../primitives/keyring" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tokio = "1.22.0" diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 592d50b33be0e..64caf8081029b 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -14,23 +14,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } log = { version = "0.4.17", default-features = false } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] array-bytes = "4.1" -sp-core = { version = "7.0.0", path = "../../primitives/core" } -sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "21.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 39a24041499f0..70a7e03898b70 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -235,7 +235,7 @@ pub mod pallet { StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; /// Genesis configuration for Sassafras protocol. - #[cfg_attr(feature = "std", derive(Default))] + #[derive(Default)] #[pallet::genesis_config] pub struct GenesisConfig { /// Genesis authorities. diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 73de4164041a3..1824940f0421d 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -15,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.50", optional = true } -scale-codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } merlin = { version = "2.0", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.136", features = ["derive"], optional = true } +scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "7.0.0", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-core = { version = "7.0.0", default-features = false, path = "../../core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.13.0", default-features = false, optional = true, path = "../../keystore" } -sp-runtime = { version = "7.0.0", default-features = false, path = "../../runtime" } -sp-std = { version = "5.0.0", default-features = false, path = "../../std" } - sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } +sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../../keystore" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } +sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } [features] default = ["std"] @@ -36,7 +36,7 @@ std = [ "merlin/std", "scale-codec/std", "scale-info/std", - "serde", + "serde/std", "sp-api/std", "sp-application-crypto/std", "sp-consensus-slots/std", @@ -47,3 +47,13 @@ std = [ "sp-std/std", "sp-timestamp", ] + +# Serde support without relying on std features. +serde = [ + "dep:serde", + "scale-info/serde", + "sp-application-crypto/serde", + "sp-consensus-slots/serde", + "sp-core/serde", + "sp-runtime/serde", +] diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 4ee21f5dd3f64..69cc184c48228 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -25,7 +25,7 @@ use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_core::crypto::KeyTypeId; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; @@ -123,7 +123,7 @@ pub struct Epoch { /// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. // TODO-SASS-P3: rename to something better... like LotteryConfig #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct SassafrasEpochConfiguration { /// Redundancy factor. pub redundancy_factor: u32, diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index 32a15ffc2f760..b218eb8d14a13 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -507,7 +507,7 @@ pub mod vrf { data: &VrfSignData, signature: &VrfSignature, ) -> bool { - let Ok(public) = PublicKey::deserialize_compressed(self.as_ref()) else { + let Ok(public) = PublicKey::deserialize_compressed(&self.0[..]) else { return false }; diff --git a/primitives/crypto/ec-utils/Cargo.toml b/primitives/crypto/ec-utils/Cargo.toml index e8546254031ea..d60b3442c8c12 100644 --- a/primitives/crypto/ec-utils/Cargo.toml +++ b/primitives/crypto/ec-utils/Cargo.toml @@ -11,7 +11,6 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-serialize = { version = "0.4.2", default-features = false } ark-ff = { version = "0.4.2", default-features = false } ark-ec = { version = "0.4.2", default-features = false } ark-std = { version = "0.4.0", default-features = false } @@ -34,6 +33,7 @@ sp-ark-bls12-381 = { version = "0.4.0-beta", default-features = false } sp-ark-bw6-761 = { version = "0.4.0-beta", default-features = false } sp-ark-ed-on-bls12-377 = { version = "0.4.0-beta", default-features = false } sp-ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0-beta", default-features = false } +ark-serialize = { version = "0.4.2", default-features = false } [features] default = [ "std" ] From dec7fd4ecff289ae8378d4a35aa247ac9d60d9d7 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 08:53:12 +0200 Subject: [PATCH 36/62] Sassafras key is SW format can't be used as AccountId32 --- primitives/core/src/bandersnatch.rs | 18 +++++++-- primitives/keyring/src/bandersnatch.rs | 56 ++++++-------------------- test-utils/runtime/src/genesismap.rs | 3 +- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index b218eb8d14a13..66de4819230a3 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -42,8 +42,14 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bs38"); const SIGNING_CTX: &[u8] = b"SigningContext"; #[cfg(feature = "full_crypto")] const SEED_SERIALIZED_LEN: usize = 32; -const PUBLIC_SERIALIZED_LEN: usize = 32; -const SIGNATURE_SERIALIZED_LEN: usize = 64; + +// Edwards form sizes (TODO davxy: propably in the end we'll use this form) +// const PUBLIC_SERIALIZED_LEN: usize = 32; +// const SIGNATURE_SERIALIZED_LEN: usize = 64; + +// Short-Weierstrass form sizes +const PUBLIC_SERIALIZED_LEN: usize = 33; +const SIGNATURE_SERIALIZED_LEN: usize = 65; /// XXX. #[cfg_attr(feature = "full_crypto", derive(Hash))] @@ -116,7 +122,7 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) + write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) } #[cfg(not(feature = "std"))] @@ -282,7 +288,11 @@ pub mod vrf { ThinVrfSignature, Transcript, }; - const PREOUT_SERIALIZED_LEN: usize = 32; + // Edwards form sizes (TODO davxy: probably in the end we'll use this form) + // const PREOUT_SERIALIZED_LEN: usize = 32; + + // Short-Weierstrass form sizes + const PREOUT_SERIALIZED_LEN: usize = 33; /// Max number of VRF inputs/outputs pub const MAX_VRF_IOS: u32 = 3; diff --git a/primitives/keyring/src/bandersnatch.rs b/primitives/keyring/src/bandersnatch.rs index a61e9dafef877..8de6786a6fbf6 100644 --- a/primitives/keyring/src/bandersnatch.rs +++ b/primitives/keyring/src/bandersnatch.rs @@ -15,15 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Support code for the runtime. A set of test accounts. +//! A set of well-known keys used for testing. pub use sp_core::bandersnatch; use sp_core::{ bandersnatch::{Pair, Public, Signature}, crypto::UncheckedFrom, - ByteArray, Pair as PairT, H256, + ByteArray, Pair as PairT, }; -use sp_runtime::AccountId32; use lazy_static::lazy_static; use std::{collections::HashMap, ops::Deref, sync::Mutex}; @@ -41,39 +40,25 @@ pub enum Keyring { Two, } +const PUBLIC_RAW_LEN: usize = ::LEN; + impl Keyring { pub fn from_public(who: &Public) -> Option { Self::iter().find(|&k| &Public::from(k) == who) } - pub fn from_account_id(who: &AccountId32) -> Option { - Self::iter().find(|&k| &k.to_account_id() == who) - } - - pub fn from_raw_public(who: [u8; 32]) -> Option { + pub fn from_raw_public(who: [u8; PUBLIC_RAW_LEN]) -> Option { Self::from_public(&Public::unchecked_from(who)) } - pub fn to_raw_public(self) -> [u8; 32] { + pub fn to_raw_public(self) -> [u8; PUBLIC_RAW_LEN] { *Public::from(self).as_ref() } - pub fn from_h256_public(who: H256) -> Option { - Self::from_public(&Public::unchecked_from(who.into())) - } - - pub fn to_h256_public(self) -> H256 { - AsRef::<[u8; 32]>::as_ref(&Public::from(self)).into() - } - pub fn to_raw_public_vec(self) -> Vec { Public::from(self).to_raw_vec() } - pub fn to_account_id(self) -> AccountId32 { - self.to_raw_public().into() - } - pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } @@ -100,11 +85,6 @@ impl Keyring { pub fn numeric(idx: usize) -> Pair { Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed") } - - /// Get account id of a `numeric` account. - pub fn numeric_id(idx: usize) -> AccountId32 { - (*AsRef::<[u8; 32]>::as_ref(&Self::numeric(idx).public())).into() - } } impl From for &'static str { @@ -160,12 +140,6 @@ lazy_static! { .collect(); } -impl From for AccountId32 { - fn from(k: Keyring) -> Self { - k.to_account_id() - } -} - impl From for Public { fn from(k: Keyring) -> Self { *(*PUBLIC_KEYS).get(&k).unwrap() @@ -178,26 +152,20 @@ impl From for Pair { } } -impl From for [u8; 32] { +impl From for [u8; PUBLIC_RAW_LEN] { fn from(k: Keyring) -> Self { *(*PUBLIC_KEYS).get(&k).unwrap().as_ref() } } -impl From for H256 { - fn from(k: Keyring) -> Self { - AsRef::<[u8; 32]>::as_ref(PUBLIC_KEYS.get(&k).unwrap()).into() - } -} - -impl From for &'static [u8; 32] { +impl From for &'static [u8; PUBLIC_RAW_LEN] { fn from(k: Keyring) -> Self { PUBLIC_KEYS.get(&k).unwrap().as_ref() } } -impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { +impl AsRef<[u8; PUBLIC_RAW_LEN]> for Keyring { + fn as_ref(&self) -> &[u8; PUBLIC_RAW_LEN] { PUBLIC_KEYS.get(self).unwrap().as_ref() } } @@ -209,8 +177,8 @@ impl AsRef for Keyring { } impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { + type Target = [u8; PUBLIC_RAW_LEN]; + fn deref(&self) -> &[u8; PUBLIC_RAW_LEN] { PUBLIC_KEYS.get(self).unwrap().as_ref() } } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 77fd971b06abd..21331d1f43854 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -116,10 +116,9 @@ impl GenesisStorageBuilder { .authorities .iter() .map(|id| { - use sp_keyring::bandersnatch::Keyring as BandersnatchKeyring; use std::str::FromStr; let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); - BandersnatchKeyring::from_str(&seed).unwrap().into() + sp_keyring::BandersnatchKeyring::from_str(&seed).unwrap().into() }) .collect(); From 5e360f951030ee3584e3c8666357deab711381e0 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 09:24:40 +0200 Subject: [PATCH 37/62] Set ring-vrf related refs to a working repo and branch --- Cargo.lock | 7 +++++++ Cargo.toml | 12 ++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee1f286d7b6ad..52daa3dcd1826 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -481,6 +481,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" dependencies = [ "ark-ec", "ark-ff", @@ -528,6 +529,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" dependencies = [ "ark-ff", "ark-serialize", @@ -793,6 +795,7 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" version = "0.0.1" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" dependencies = [ "ark-bls12-381", "ark-ec", @@ -1441,6 +1444,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" +source = "git+https://github.com/davxy/ring-proof?branch=no-std#ff6bb1342bf2c7160a8cae3c87d6803e65342f1f" dependencies = [ "ark-ec", "ark-ff", @@ -2193,6 +2197,7 @@ checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" [[package]] name = "dleq_vrf" version = "0.0.2" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" dependencies = [ "ark-ec", "ark-ff", @@ -2565,6 +2570,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" +source = "git+https://github.com/davxy/fflonk?branch=no-std#084c03b46a88a873837ba86c07db30f40b1d70dd" dependencies = [ "ark-ec", "ark-ff", @@ -8796,6 +8802,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" +source = "git+https://github.com/davxy/ring-proof?branch=no-std#ff6bb1342bf2c7160a8cae3c87d6803e65342f1f" dependencies = [ "ark-ec", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index 240b540b39cf5..61db364f87930 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -344,11 +344,15 @@ lto = "fat" codegen-units = 1 [patch."https://github.com/w3f/ring-vrf"] -bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } +bandersnatch_vrfs = { git = "https://github.com/davxy/ring-vrf", branch = "refactory-and-tests" } +# bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } [patch."https://github.com/w3f/fflonk"] -fflonk = { path = "/mnt/ssd/develop/w3f/fflonk" } +fflonk = { git = "https://github.com/davxy/fflonk", branch = "no-std" } +# fflonk = { path = "/mnt/ssd/develop/w3f/fflonk" } [patch."https://github.com/w3f/ring-proof"] -common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } -ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } +common = { git = "https://github.com/davxy/ring-proof", branch = "no-std" } +ring = { git = "https://github.com/davxy/ring-proof", branch = "no-std" } +# common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } +# ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } From 23e150eb079a8031308dc744708cab4d33f1932b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 10:22:49 +0200 Subject: [PATCH 38/62] Clippy fix --- Cargo.lock | 6 +++--- Cargo.toml | 6 +++--- primitives/core/src/bandersnatch.rs | 6 ++---- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 52daa3dcd1826..23056e0232de4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1444,7 +1444,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/davxy/ring-proof?branch=no-std#ff6bb1342bf2c7160a8cae3c87d6803e65342f1f" +source = "git+https://github.com/davxy/ring-proof?branch=working-fork#5bdca95a9d0434c722a98b3310db6e46e8fbd981" dependencies = [ "ark-ec", "ark-ff", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/davxy/fflonk?branch=no-std#084c03b46a88a873837ba86c07db30f40b1d70dd" +source = "git+https://github.com/davxy/fflonk?branch=working-fork#a2664567b88d96e1dc2f82f8799b2ca60171c81d" dependencies = [ "ark-ec", "ark-ff", @@ -8802,7 +8802,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/davxy/ring-proof?branch=no-std#ff6bb1342bf2c7160a8cae3c87d6803e65342f1f" +source = "git+https://github.com/davxy/ring-proof?branch=working-fork#5bdca95a9d0434c722a98b3310db6e46e8fbd981" dependencies = [ "ark-ec", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index 61db364f87930..df2a8ca903b05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -348,11 +348,11 @@ bandersnatch_vrfs = { git = "https://github.com/davxy/ring-vrf", branch = "refac # bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } [patch."https://github.com/w3f/fflonk"] -fflonk = { git = "https://github.com/davxy/fflonk", branch = "no-std" } +fflonk = { git = "https://github.com/davxy/fflonk", branch = "working-fork" } # fflonk = { path = "/mnt/ssd/develop/w3f/fflonk" } [patch."https://github.com/w3f/ring-proof"] -common = { git = "https://github.com/davxy/ring-proof", branch = "no-std" } -ring = { git = "https://github.com/davxy/ring-proof", branch = "no-std" } +common = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" } +ring = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" } # common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } # ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index 66de4819230a3..2a5013b5be662 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -258,10 +258,8 @@ impl TraitPair for Pair { /// Returns true if the signature is good. fn verify>(signature: &Self::Signature, data: M, public: &Self::Public) -> bool { let data = vrf::VrfSignData::new(SIGNING_CTX, &[data.as_ref()], vrf::VrfIosVec::default()); - let signature = vrf::VrfSignature { - signature: signature.clone(), - vrf_outputs: vrf::VrfIosVec::default(), - }; + let signature = + vrf::VrfSignature { signature: *signature, vrf_outputs: vrf::VrfIosVec::default() }; public.vrf_verify(&data, &signature) } From 1ae6d371dcf32c3c3d74dd66c42dd141145a27ea Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 10:25:35 +0200 Subject: [PATCH 39/62] Removed try-runtime option from node-sassafras bin --- Cargo.lock | 2 -- bin/node-sassafras/node/Cargo.toml | 9 --------- bin/node-sassafras/node/src/cli.rs | 8 -------- bin/node-sassafras/node/src/command.rs | 17 ----------------- bin/node-sassafras/runtime/Cargo.toml | 11 ----------- bin/node-sassafras/runtime/src/lib.rs | 15 --------------- 6 files changed, 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23056e0232de4..10c663ca8d829 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5700,7 +5700,6 @@ dependencies = [ "sp-timestamp", "substrate-build-script-utils", "substrate-frame-rpc-system", - "try-runtime-cli", ] [[package]] @@ -5713,7 +5712,6 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "frame-try-runtime", "pallet-balances", "pallet-grandpa", "pallet-sassafras", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index db6c93652dc5c..10f6a3b468f45 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -59,9 +59,6 @@ frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/b # Local Dependencies node-sassafras-runtime = { version = "0.3.2-dev", path = "../runtime" } -# CLI-specific dependencies -try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } - [build-dependencies] substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } @@ -70,12 +67,6 @@ default = [] runtime-benchmarks = [ "node-sassafras-runtime/runtime-benchmarks" ] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. -try-runtime = [ - "node-sassafras-runtime/try-runtime", - "try-runtime-cli" -] use-session-pallet = [ "node-sassafras-runtime/use-session-pallet" ] diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs index 4ab4d34210c98..5bc6c9b102aaf 100644 --- a/bin/node-sassafras/node/src/cli.rs +++ b/bin/node-sassafras/node/src/cli.rs @@ -40,14 +40,6 @@ pub enum Subcommand { #[clap(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), - /// Try some command against runtime state. - #[cfg(feature = "try-runtime")] - TryRuntime(try_runtime_cli::TryRuntimeCmd), - - /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. - #[cfg(not(feature = "try-runtime"))] - TryRuntime, - /// Db meta columns information. ChainInfo(sc_cli::ChainInfoCmd), } diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index cc8f1a39ec634..4c37820b3c9bb 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -129,23 +129,6 @@ pub fn run() -> sc_cli::Result<()> { } }) }, - #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - // we don't need any of the components of new_partial, just a runtime, or a task - // manager to do `async_run`. - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = - sc_service::TaskManager::new(config.tokio_handle.clone(), registry) - .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - Ok((cmd.run::(config), task_manager)) - }) - }, - #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ - You can enable it with `--features try-runtime`." - .into()), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(&config)) diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index e3298256a2388..69d7a2c62d876 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -22,7 +22,6 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../.. pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } -frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } @@ -89,14 +88,4 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = [ - "frame-executive/try-runtime", - "frame-try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", -] use-session-pallet = [] diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 0244ae8f87b80..73f29a8486937 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -559,19 +559,4 @@ impl_runtime_apis! { Ok(batches) } } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade() -> (Weight, Weight) { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. If any of the pre/post migration checks fail, we shall stop - // right here and right now. - let weight = Executive::try_runtime_upgrade().unwrap(); - (weight, BlockWeights::get().max_block) - } - - fn execute_block_no_check(block: Block) -> Weight { - Executive::execute_block_no_check(block) - } - } } From fdb746eba49853a2cd2df40d971a091963abb1c6 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 11:02:04 +0200 Subject: [PATCH 40/62] Fix one more clippy error --- frame/sassafras/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 70a7e03898b70..4250f3ab96fb7 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -802,7 +802,7 @@ impl Pallet { slot_idx -= duration; if tickets_meta.segments_count != 0 { Self::sort_tickets(tickets_meta.segments_count, epoch_tag, &mut tickets_meta); - TicketsMeta::::set(tickets_meta.clone()); + TicketsMeta::::set(tickets_meta); } } else if slot_idx >= 2 * duration { return None From 1074d37b70000c7a6f2f7d99dd9a63251eafa932 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 13 Jun 2023 11:18:08 +0200 Subject: [PATCH 41/62] Fix cargo doc --- client/consensus/babe/src/tests.rs | 1 - frame/sassafras/src/lib.rs | 18 +++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 7586fc04a85b8..0cd528e141f85 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -32,7 +32,6 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 4250f3ab96fb7..26a9004dcb81a 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -750,7 +750,7 @@ impl Pallet { Epoch { epoch_idx, start_slot, config } } - /// Fetch expected ticket for the given slot according to an "outside-in" sorting strategy. + /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. /// /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, /// with n >= k, then the tickets are assigned to the slots according to the following @@ -761,13 +761,13 @@ impl Pallet { /// /// With slot-index computed as `epoch_start() - slot`. /// - /// If `slot` value falls within the current epoch then we fetch tickets from the `Tickets` - /// list. + /// If `slot` value falls within the current epoch then we fetch tickets from the current epoch + /// tickets list. /// - /// If `slot` value falls within the next epoch then we fetch tickets from the `NextTickets` - /// list. Note that in this case we may have not finished receiving all the tickets for that - /// epoch yet. The next epoch tickets should be considered "stable" only after the current - /// epoch first half (see the [`submit_tickets_unsigned_extrinsic`]). + /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch + /// tickets ids list. Note that in this case we may have not finished receiving all the tickets + /// for that epoch yet. The next epoch tickets should be considered "stable" only after the + /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). /// /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), @@ -883,8 +883,8 @@ impl Pallet { /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to /// `submit_unsigned_transaction`. /// - /// The submitted tickets are added to the `NextTickets` list as long as the extrinsic has - /// is called within the first half of the epoch. That is, tickets received within the + /// The submitted tickets are added to the next epoch outstanding tickets as long as the + /// extrinsic is called within the first half of the epoch. Tickets received during the /// second half are dropped. /// /// TODO-SASS-P3: use pass a bounded vector??? From 0f148d72d637420114cfa44f24e113db8d8830c4 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 16 Jun 2023 10:37:38 +0200 Subject: [PATCH 42/62] Sassafras protocol v0.3.3 (#14362) - bump version to 0.3.3 - integration of ring-vrf with zk ring proof crypto primitive - tickets authoring and verification via ring zk proof --- Cargo.lock | 29 +- Cargo.toml | 4 + bin/node-sassafras/node/Cargo.toml | 8 +- bin/node-sassafras/node/src/chain_spec.rs | 6 +- bin/node-sassafras/runtime/Cargo.toml | 6 +- bin/node-sassafras/runtime/src/lib.rs | 6 +- client/consensus/sassafras/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 77 ++-- client/consensus/sassafras/src/lib.rs | 6 +- client/consensus/sassafras/src/tests.rs | 15 +- .../consensus/sassafras/src/verification.rs | 11 +- client/keystore/src/local.rs | 15 + frame/sassafras/Cargo.toml | 6 +- frame/sassafras/src/benchmarking.rs | 6 +- frame/sassafras/src/lib.rs | 84 +++-- frame/sassafras/src/mock.rs | 59 ++- frame/sassafras/src/session.rs | 6 +- frame/sassafras/src/tests.rs | 29 +- primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/consensus/sassafras/src/digests.rs | 4 +- primitives/consensus/sassafras/src/lib.rs | 24 +- primitives/consensus/sassafras/src/ticket.rs | 36 +- primitives/core/src/bandersnatch.rs | 342 +++++++++++++++++- primitives/io/src/lib.rs | 7 +- primitives/keystore/src/lib.rs | 9 + primitives/keystore/src/testing.rs | 22 +- test-utils/runtime/Cargo.toml | 4 +- test-utils/runtime/src/genesismap.rs | 2 +- test-utils/runtime/src/lib.rs | 6 +- 29 files changed, 661 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 10c663ca8d829..117efac5ad7ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -481,7 +481,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#b391e66f73656b123bcd103c3e81de18e253236d" dependencies = [ "ark-ec", "ark-ff", @@ -529,7 +529,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#b391e66f73656b123bcd103c3e81de18e253236d" dependencies = [ "ark-ff", "ark-serialize", @@ -795,7 +795,7 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" version = "0.0.1" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#b391e66f73656b123bcd103c3e81de18e253236d" dependencies = [ "ark-bls12-381", "ark-ec", @@ -2197,7 +2197,7 @@ checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#6c474aaa6a9eb29a0c4e4975e1d87d664c8c5853" +source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#b391e66f73656b123bcd103c3e81de18e253236d" dependencies = [ "ark-ec", "ark-ff", @@ -5662,7 +5662,7 @@ dependencies = [ [[package]] name = "node-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" dependencies = [ "clap 4.3.2", "frame-benchmarking", @@ -5704,7 +5704,7 @@ dependencies = [ [[package]] name = "node-sassafras-runtime" -version = "0.3.2-dev" +version = "0.3.3-dev" dependencies = [ "frame-benchmarking", "frame-executive", @@ -7385,7 +7385,7 @@ dependencies = [ [[package]] name = "pallet-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" dependencies = [ "array-bytes 4.2.0", "frame-benchmarking", @@ -7396,7 +7396,6 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", - "sp-application-crypto", "sp-consensus-sassafras", "sp-core", "sp-io", @@ -7841,9 +7840,8 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" +version = "3.6.0" +source = "git+https://github.com/koute/parity-scale-codec?branch=master_fix_stack_overflow#b47acfd9bbf2659e5f3d05357d7cbc93afe6bb14" dependencies = [ "arrayvec 0.7.2", "bitvec", @@ -7856,9 +7854,8 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" +version = "3.6.0" +source = "git+https://github.com/koute/parity-scale-codec?branch=master_fix_stack_overflow#b47acfd9bbf2659e5f3d05357d7cbc93afe6bb14" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9637,7 +9634,7 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" dependencies = [ "async-trait", "fork-tree", @@ -11340,7 +11337,7 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" dependencies = [ "async-trait", "merlin 2.0.1", diff --git a/Cargo.toml b/Cargo.toml index df2a8ca903b05..eef655bf2b50c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -356,3 +356,7 @@ common = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" ring = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" } # common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } # ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } + +[patch.crates-io] +#parity-scale-codec = { git = "https://github.com/koute/parity-scale-codec", branch = "master_fix_stack_overflow" } +codec = { package = "parity-scale-codec", git = "https://github.com/koute/parity-scale-codec", branch = "master_fix_stack_overflow" } diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index 10f6a3b468f45..f7226797957d0 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" authors = ["Parity Technologies "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -27,8 +27,8 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } -sc-consensus-sassafras = { version = "0.3.2-dev", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.3.2-dev", path = "../../../primitives/consensus/sassafras" } +sc-consensus-sassafras = { version = "0.3.3-dev", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.3-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } @@ -57,7 +57,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarkin frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } # Local Dependencies -node-sassafras-runtime = { version = "0.3.2-dev", path = "../runtime" } +node-sassafras-runtime = { version = "0.3.3-dev", path = "../runtime" } [build-dependencies] substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 0636078bf60dd..6db56b22805d6 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -11,7 +11,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; // Genesis constants for Sassafras parameters configuration. -const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 16; +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 8; const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. @@ -127,9 +127,9 @@ fn testnet_genesis( }, sassafras: SassafrasConfig { #[cfg(feature = "use-session-pallet")] - authorities: vec![], + authorities: Vec::new(), #[cfg(not(feature = "use-session-pallet"))] - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 0)).collect(), + authorities: initial_authorities.iter().map(|x| x.1.clone()).collect(), epoch_config: SassafrasEpochConfiguration { attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index 69d7a2c62d876..a5f9e53ac9dfa 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras-runtime" -version = "0.3.2-dev" +version = "0.3.3-dev" authors = ["Parity Technologies "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -pallet-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../../frame/sassafras" } +pallet-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } @@ -27,7 +27,7 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} -sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 73f29a8486937..7d5062105d0f8 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -387,6 +387,10 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + fn submit_tickets_unsigned_extrinsic( tickets: Vec ) -> bool { @@ -397,7 +401,7 @@ impl_runtime_apis! { Sassafras::slot_ticket_id(slot) } - fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketData)> { + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { Sassafras::slot_ticket(slot) } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 6037302d4a65d..5352129e31fd0 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" authors = ["Parity Technologies "] description = "Sassafras consensus algorithm for substrate" edition = "2021" @@ -32,7 +32,7 @@ sp-application-crypto = { version = "23.0.0", path = "../../../primitives/applic sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { version = "0.3.2-dev", path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.3-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index e922f17b70d3f..c2a38e1e58e33 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -22,10 +22,9 @@ use super::*; use sp_consensus_sassafras::{ digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, - TicketClaim, TicketData, TicketEnvelope, TicketId, + TicketBody, TicketClaim, TicketEnvelope, TicketId, }; -use sp_core::{ed25519, twox_64, ByteArray}; - +use sp_core::{bandersnatch::ring_vrf::RingVrfContext, ed25519, twox_64, ByteArray}; use std::pin::Pin; /// Get secondary authority index for the given epoch and slot. @@ -42,7 +41,7 @@ pub(crate) fn secondary_authority_index( pub(crate) fn claim_slot( slot: Slot, epoch: &mut Epoch, - maybe_ticket: Option<(TicketId, TicketData)>, + maybe_ticket: Option<(TicketId, TicketBody)>, keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { let config = &epoch.config; @@ -79,7 +78,7 @@ pub(crate) fn claim_slot( }, }; - let authority_id = config.authorities.get(authority_idx as usize).map(|auth| &auth.0)?; + let authority_id = config.authorities.get(authority_idx as usize)?; let vrf_signature = keystore .bandersnatch_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_sign_data) @@ -92,9 +91,14 @@ pub(crate) fn claim_slot( } /// Generate the tickets for the given epoch. +/// /// Tickets additional information will be stored within the `Epoch` structure. -/// The additional information will be used later during session to claim slots. -fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec { +/// The additional information will be used later during the epoch to claim slots. +fn generate_epoch_tickets( + epoch: &mut Epoch, + keystore: &KeystorePtr, + ring_ctx: &RingVrfContext, +) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; let redundancy_factor = config.threshold_params.redundancy_factor; @@ -110,12 +114,18 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec = config.authorities.iter().map(|a| *a.as_ref()).collect(); + + for (authority_idx, authority_id) in config.authorities.iter().enumerate() { if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { continue } + debug!(target: LOG_TARGET, ">>> Generating new ring prover key..."); + let prover = ring_ctx.prover(&pks, authority_idx).unwrap(); + debug!(target: LOG_TARGET, ">>> ...done"); + let make_ticket = |attempt_idx| { let vrf_input = ticket_id_vrf_input(&config.randomness, attempt_idx, epoch.epoch_idx); @@ -131,25 +141,36 @@ fn generate_epoch_tickets(epoch: &mut Epoch, keystore: &KeystorePtr) -> Vec>> Creating ring proof for attempt {}", attempt_idx); + let mut sign_data = ticket_body_sign_data(&ticket_body); + sign_data.push_vrf_input(vrf_input).expect("Can't fail"); + + let ring_signature = keystore + .bandersnatch_ring_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + &sign_data, + &prover, + ) + .ok()??; + debug!(target: LOG_TARGET, ">>> ...done"); - // TODO DAVXY: placeholder - let ring_proof = (); - let ticket_envelope = TicketEnvelope { data, vrf_preout, ring_proof }; + let ticket_envelope = TicketEnvelope { body: ticket_body, ring_signature }; let ticket_secret = TicketSecret { attempt_idx, erased_secret: erased_seed }; - Some((ticket_envelope, ticket_id, ticket_secret)) + Some((ticket_id, ticket_envelope, ticket_secret)) }; for attempt in 0..max_attempts { - if let Some((envelope, ticket_id, ticket_secret)) = make_ticket(attempt) { + if let Some((ticket_id, ticket_envelope, ticket_secret)) = make_ticket(attempt) { log::debug!(target: LOG_TARGET, " → {ticket_id:016x}"); epoch .tickets_aux .insert(ticket_id, (authority_idx as AuthorityIndex, ticket_secret)); - tickets.push(envelope); + tickets.push(ticket_envelope); } } } @@ -415,11 +436,6 @@ async fn start_tickets_worker( }, }; - let tickets = generate_epoch_tickets(&mut epoch, &keystore); - if tickets.is_empty() { - continue - } - // Get the best block on which we will publish the tickets. let best_hash = match select_chain.best_chain().await { Ok(header) => header.hash(), @@ -429,6 +445,23 @@ async fn start_tickets_worker( }, }; + let ring_ctx = match client.runtime_api().ring_context(best_hash) { + Ok(Some(ctx)) => ctx, + Ok(None) => { + info!(target: LOG_TARGET, "Ring context not initialized yet"); + continue + }, + Err(err) => { + error!(target: LOG_TARGET, "Unable to read ring context: {}", err); + continue + }, + }; + + let tickets = generate_epoch_tickets(&mut epoch, &keystore, &ring_ctx); + if tickets.is_empty() { + continue + } + let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(best_hash, tickets) { Err(err) => Some(err.to_string()), Ok(false) => Some("Unknown reason".to_string()), diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4635c77e1fa8b..aafea3acbfcec 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -77,9 +77,9 @@ use sp_runtime::{ pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, - slot_claim_sign_data, slot_claim_vrf_input, ticket_id_vrf_input, AuthorityId, AuthorityIndex, - AuthorityPair, AuthoritySignature, SassafrasApi, SassafrasAuthorityWeight, - SassafrasConfiguration, SassafrasEpochConfiguration, TicketClaim, TicketData, TicketEnvelope, + slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id_vrf_input, + AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, SassafrasApi, + SassafrasConfiguration, SassafrasEpochConfiguration, TicketBody, TicketClaim, TicketEnvelope, TicketId, TicketSecret, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 65363388e6405..6756fef2f5660 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -185,9 +185,9 @@ fn create_test_config() -> SassafrasConfiguration { slot_duration: SLOT_DURATION, epoch_duration: EPOCH_DURATION, authorities: vec![ - (Keyring::Alice.public().into(), 1), - (Keyring::Bob.public().into(), 1), - (Keyring::Charlie.public().into(), 1), + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into(), ], randomness: [0; 32], threshold_params: SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, @@ -426,8 +426,7 @@ fn claim_primary_slots_works() { let alice_authority_idx = 0_u32; let ticket_id = 123; - let ticket_data = - TicketData { attempt_idx: 0, erased_public: [0; 32], revealed_public: [0; 32] }; + let ticket_body = TicketBody { attempt_idx: 0, erased_public: [0; 32] }; let ticket_secret = TicketSecret { attempt_idx: 0, erased_secret: [0; 32] }; // Fail if we have authority key in our keystore but not ticket aux data @@ -436,7 +435,7 @@ fn claim_primary_slots_works() { let claim = authorship::claim_slot( 0.into(), &mut epoch, - Some((ticket_id, ticket_data.clone())), + Some((ticket_id, ticket_body.clone())), &keystore, ); @@ -453,7 +452,7 @@ fn claim_primary_slots_works() { let (pre_digest, auth_id) = authorship::claim_slot( 0.into(), &mut epoch, - Some((ticket_id, ticket_data.clone())), + Some((ticket_id, ticket_body.clone())), &keystore, ) .unwrap(); @@ -468,7 +467,7 @@ fn claim_primary_slots_works() { epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); let claim = - authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_data)), &keystore); + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_body)), &keystore); assert!(claim.is_none()); assert!(epoch.tickets_aux.is_empty()); } diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index f149f9758c032..4206ef195c5d0 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -38,7 +38,7 @@ struct VerificationParams<'a, B: 'a + BlockT> { /// Origin origin: BlockOrigin, /// Expected ticket for this block. - maybe_ticket: Option<(TicketId, TicketData)>, + maybe_ticket: Option<(TicketId, TicketBody)>, } /// Verified information @@ -76,9 +76,8 @@ fn check_header( return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let authority_id = match config.authorities.get(pre_digest.authority_idx as usize) { - Some(authority_id) => authority_id.0.clone(), - None => return Err(sassafras_err(Error::SlotAuthorNotFound)), + let Some(authority_id) = config.authorities.get(pre_digest.authority_idx as usize) else { + return Err(sassafras_err(Error::SlotAuthorNotFound)); }; // Check header signature (aka the Seal) @@ -88,7 +87,7 @@ fn check_header( .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; let pre_hash = header.hash(); - if !AuthorityPair::verify(&signature, &pre_hash, &authority_id) { + if !AuthorityPair::verify(&signature, &pre_hash, authority_id) { return Err(sassafras_err(Error::BadSignature(pre_hash))) } @@ -139,7 +138,7 @@ fn check_header( return Err(sassafras_err(Error::VrfVerificationFailed)) } - let info = VerifiedHeaderInfo { authority_id, seal }; + let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal }; Ok(CheckedHeader::Checked(header, info)) } diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 0fdbb24bf70b7..dae300f0b80b5 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -277,6 +277,21 @@ impl Keystore for LocalKeystore { self.vrf_output::(key_type, public, input) } + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> std::result::Result, TraitError> { + let sig = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|pair| pair.ring_vrf_sign(data, prover)); + Ok(sig) + } + #[cfg(feature = "bls-experimental")] fn bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 64caf8081029b..08d10559caab1 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -21,8 +21,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys log = { version = "0.4.17", default-features = false } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } -sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } +sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } @@ -42,7 +41,6 @@ std = [ "pallet-session/std", "pallet-timestamp/std", "scale-info/std", - "sp-application-crypto/std", "sp-consensus-sassafras/std", "sp-io/std", "sp-runtime/std", diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs index fdf3f742bfb57..f4788b06b46fd 100644 --- a/frame/sassafras/src/benchmarking.rs +++ b/frame/sassafras/src/benchmarking.rs @@ -33,11 +33,7 @@ fn make_dummy_ticket(attempt_idx: u32) -> TicketEnvelope { 0xa7, 0x3d, ]; let output = VrfOutput::decode(&mut output_enc).unwrap(); - let data = TicketData { - attempt_idx, - erased_public: Default::default(), - revealed_public: Default::default(), - }; + let data = TicketData { attempt_idx, erased_public: Default::default() }; TicketEnvelope { data, vrf_preout: output, ring_proof: () } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 26a9004dcb81a..e04fb7a271238 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -54,9 +54,9 @@ use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, Epoch, EquivocationProof, Randomness, SassafrasAuthorityWeight, - SassafrasConfiguration, SassafrasEpochConfiguration, Slot, TicketData, TicketEnvelope, - TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + AuthorityId, Epoch, EquivocationProof, Randomness, RingVrfContext, SassafrasConfiguration, + SassafrasEpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -147,20 +147,14 @@ pub mod pallet { /// Current epoch authorities. #[pallet::storage] #[pallet::getter(fn authorities)] - pub type Authorities = StorageValue< - _, - WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, - ValueQuery, - >; + pub type Authorities = + StorageValue<_, WeakBoundedVec, ValueQuery>; /// Next epoch authorities. #[pallet::storage] #[pallet::getter(fn next_authorities)] - pub type NextAuthorities = StorageValue< - _, - WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, - ValueQuery, - >; + pub type NextAuthorities = + StorageValue<_, WeakBoundedVec, ValueQuery>; /// The slot at which the first epoch started. /// This is `None` until the first block is imported on chain. @@ -224,7 +218,7 @@ pub mod pallet { /// Tickets to be used for current and next epoch. #[pallet::storage] - pub type TicketsData = StorageMap<_, Identity, TicketId, TicketData, ValueQuery>; + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody, ValueQuery>; /// Next epoch tickets accumulator. /// Special `u32::MAX` key is reserved for a partially sorted segment. @@ -234,12 +228,18 @@ pub mod pallet { pub type NextTicketsSegments = StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; + /// Parameters used to verify tickets validity via ring-proof + /// In practice: Updatable Universal Reference String and the seed. + #[pallet::storage] + #[pallet::getter(fn ring_context)] + pub type RingContext = StorageValue<_, RingVrfContext>; + /// Genesis configuration for Sassafras protocol. #[derive(Default)] #[pallet::genesis_config] pub struct GenesisConfig { /// Genesis authorities. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + pub authorities: Vec, /// Genesis epoch configuration. pub epoch_config: SassafrasEpochConfiguration, } @@ -249,6 +249,11 @@ pub mod pallet { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); + + // TODO davxy : temporary code to generate a testing ring context + log::debug!(target: LOG_TARGET, "Building new testing ring context"); + let ring_ctx = RingVrfContext::new_testing(); + RingContext::::set(Some(ring_ctx)); } } @@ -355,6 +360,17 @@ pub mod pallet { log::debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + log::debug!(target: LOG_TARGET, "LOADING RING CTX"); + let Some(ring_ctx) = RingContext::::get() else { + return Err("Ring context not initialized".into()) + }; + log::debug!(target: LOG_TARGET, "... Loaded"); + + log::debug!(target: LOG_TARGET, "Building prover"); + let pks: Vec<_> = Self::authorities().iter().map(|auth| *auth.as_ref()).collect(); + let verifier = ring_ctx.verifier(pks.as_slice()).unwrap(); + log::debug!(target: LOG_TARGET, "... Built"); + // Check tickets score let next_auth = NextAuthorities::::get(); let epoch_config = EpochConfig::::get(); @@ -372,18 +388,35 @@ pub mod pallet { let epoch_idx = EpochIndex::::get() + 1; let mut segment = BoundedVec::with_max_capacity(); - for ticket in tickets.iter() { + for ticket in tickets { + log::debug!(target: LOG_TARGET, "Checking ring proof"); + let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( &randomness, - ticket.data.attempt_idx, + ticket.body.attempt_idx, epoch_idx, ); - let ticket_id = sp_consensus_sassafras::ticket_id(&vrf_input, &ticket.vrf_preout); - if ticket_id < ticket_threshold { - TicketsData::::set(ticket_id, ticket.data.clone()); + + let Some(vrf_preout) = ticket.ring_signature.outputs.get(0) else { + log::debug!(target: LOG_TARGET, "Missing ticket pre-output from ring signature"); + continue; + }; + let ticket_id = sp_consensus_sassafras::ticket_id(&vrf_input, &vrf_preout); + if ticket_id >= ticket_threshold { + log::debug!(target: LOG_TARGET, "Over threshold"); + continue + } + + let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&ticket.body); + sign_data.push_vrf_input(vrf_input).expect("Can't fail"); + + if ticket.ring_signature.verify(&sign_data, &verifier) { + TicketsData::::set(ticket_id, ticket.body.clone()); segment .try_push(ticket_id) .expect("has same length as bounded input vector; qed"); + } else { + log::debug!(target: LOG_TARGET, "Proof verification failure"); } } @@ -577,11 +610,8 @@ impl Pallet { /// If we detect one or more skipped epochs the policy is to use the authorities and values /// from the first skipped epoch. The tickets are invalidated. pub(crate) fn enact_epoch_change( - authorities: WeakBoundedVec<(AuthorityId, SassafrasAuthorityWeight), T::MaxAuthorities>, - next_authorities: WeakBoundedVec< - (AuthorityId, SassafrasAuthorityWeight), - T::MaxAuthorities, - >, + authorities: WeakBoundedVec, + next_authorities: WeakBoundedVec, ) { // PRECONDITION: caller has done initialization. // If using the internal trigger or the session pallet then this is guaranteed. @@ -687,7 +717,7 @@ impl Pallet { } // Initialize authorities on genesis phase. - fn initialize_genesis_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { + fn initialize_genesis_authorities(authorities: &[AuthorityId]) { // Genesis authorities may have been initialized via other means (e.g. via session pallet). // If this function has already been called with some authorities, then the new list // should be match the previously set one. @@ -820,7 +850,7 @@ impl Pallet { /// /// Refer to the `slot_ticket_id` documentation for the slot-ticket association /// criteria. - pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketData)> { + pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { Self::slot_ticket_id(slot).map(|id| (id, TicketsData::::get(id))) } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index fa3dfcc83593d..e3164f4132a3a 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -22,8 +22,8 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever}; use frame_support::traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityIndex, AuthorityPair, SassafrasEpochConfiguration, Slot, - TicketData, TicketEnvelope, VrfSignature, + digests::PreDigest, AuthorityIndex, AuthorityPair, RingProver, SassafrasEpochConfiguration, + Slot, TicketBody, TicketEnvelope, VrfSignature, }; use sp_core::{ crypto::{Pair, VrfSecret}, @@ -127,7 +127,7 @@ pub fn new_test_ext_with_pairs( .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) .collect::>(); - let authorities = pairs.iter().map(|p| (p.public(), 1)).collect(); + let authorities = pairs.iter().map(|p| p.public()).collect(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -142,7 +142,13 @@ pub fn new_test_ext_with_pairs( (pairs, storage.into()) } -fn make_ticket(slot: Slot, attempt: u32, pair: &AuthorityPair) -> TicketEnvelope { +fn make_ticket_with_prover( + slot: Slot, + attempt: u32, + pair: &AuthorityPair, + prover: &RingProver, +) -> TicketEnvelope { + println!("ATTEMPT: {}", attempt); let mut epoch = Sassafras::epoch_index(); let mut randomness = Sassafras::randomness(); @@ -153,22 +159,53 @@ fn make_ticket(slot: Slot, attempt: u32, pair: &AuthorityPair) -> TicketEnvelope randomness = crate::NextRandomness::::get(); } + let body = TicketBody { attempt_idx: attempt, erased_public: [0; 32] }; + + let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&body); + let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt, epoch); - let vrf_preout = pair.as_ref().vrf_output(&vrf_input.into()); + sign_data.push_vrf_input(vrf_input).unwrap(); + + let ring_signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + + // Ticket-id can be generated via vrf-preout. + // We don't care that much about the value here. + + TicketEnvelope { body, ring_signature } +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + println!("Make prover"); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + println!("Done"); - // TODO DAVXY: use some well known valid test keys... - let data = - TicketData { attempt_idx: attempt, erased_public: [0; 32], revealed_public: [0; 32] }; - TicketEnvelope { data, vrf_preout, ring_proof: () } + prover } -/// Construct at most `attempts` tickets for the given `slot`. +/// Construct at most `attempts` tickets envelopes for the given `slot`. /// TODO-SASS-P3: filter out invalid tickets according to test threshold. /// E.g. by passing an optional threshold pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec { + let prover = make_prover(pair); (0..attempts) .into_iter() - .map(|attempt| make_ticket(slot, attempt, pair)) + .map(|attempt| make_ticket_with_prover(slot, attempt, pair, &prover)) .collect() } diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs index e15fd3637b9ae..6f16941c99e80 100644 --- a/frame/sassafras/src/session.rs +++ b/frame/sassafras/src/session.rs @@ -40,7 +40,7 @@ impl OneSessionHandler for Pallet { where I: Iterator, { - let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + let authorities: Vec<_> = validators.map(|(_, k)| k).collect(); Self::initialize_genesis_authorities(&authorities); } @@ -48,7 +48,7 @@ impl OneSessionHandler for Pallet { where I: Iterator, { - let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let authorities = validators.map(|(_account, k)| k).collect(); let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( authorities, Some( @@ -57,7 +57,7 @@ impl OneSessionHandler for Pallet { ), ); - let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_authorities = queued_validators.map(|(_account, k)| k).collect(); let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( next_authorities, Some( diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index c9e26f80fba04..4e0bec18de3c2 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -343,6 +343,8 @@ fn produce_epoch_change_digest_with_config() { }) } +// TODO davxy: create a read_tickets method which reads pre-constructed good tickets +// from a file. Creating this stuff "on-the-fly" is just too much expensive #[test] fn submit_segments_works() { let (pairs, mut ext) = new_test_ext_with_pairs(1); @@ -350,6 +352,8 @@ fn submit_segments_works() { // We're going to generate 14 segments. let segments_count = 3; + let ring_ctx = RingVrfContext::new_testing(); + ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; @@ -362,6 +366,8 @@ fn submit_segments_works() { config.redundancy_factor = 2; EpochConfig::::set(config); + RingContext::::set(Some(ring_ctx.clone())); + // Populate the segments via the `submit_tickets` let tickets = make_tickets(start_slot + 1, segments_count * max_tickets, pair); let segment_len = tickets.len() / segments_count as usize; @@ -392,11 +398,15 @@ fn segments_incremental_sortition_works() { let pair = &pairs[0]; let segments_count = 14; + let ring_ctx = RingVrfContext::new_testing(); + ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); + RingContext::::set(Some(ring_ctx.clone())); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // Manually populate the segments to fool the threshold check @@ -409,7 +419,7 @@ fn segments_incremental_sortition_works() { .enumerate() .map(|(j, ticket)| { let ticket_id = (i * segment_len + j) as TicketId; - TicketsData::::set(ticket_id, ticket.data.clone()); + TicketsData::::set(ticket_id, ticket.body.clone()); ticket_id }) .collect(); @@ -475,14 +485,20 @@ fn segments_incremental_sortition_works() { #[test] fn submit_enact_claim_tickets() { + use sp_core::crypto::VrfSecret; + let (pairs, mut ext) = new_test_ext_with_pairs(4); + let ring_ctx = RingVrfContext::new_testing(); + ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); let pair = &pairs[0]; + RingContext::::set(Some(ring_ctx.clone())); + initialize_block(start_block, start_slot, Default::default(), pair); // We don't want to trigger an epoch change in this test. @@ -524,15 +540,16 @@ fn submit_enact_claim_tickets() { // Compute and sort the tickets ids (aka tickets scores) let mut expected_ids: Vec<_> = tickets .iter() - .map(|t| { + .map(|ticket| { let epoch_idx = Sassafras::epoch_index() + 1; let randomness = Sassafras::next_randomness(); let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( &randomness, - t.data.attempt_idx, + ticket.body.attempt_idx, epoch_idx, ); - sp_consensus_sassafras::ticket_id(&vrf_input, &t.vrf_preout) + let vrf_output = pair.as_ref().vrf_output(&vrf_input); + sp_consensus_sassafras::ticket_id(&vrf_input, &vrf_output) }) .collect(); expected_ids.sort(); @@ -575,11 +592,15 @@ fn submit_enact_claim_tickets() { fn block_allowed_to_skip_epochs() { let (pairs, mut ext) = new_test_ext_with_pairs(4); + let ring_ctx = RingVrfContext::new_testing(); + ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; let epoch_duration: u64 = ::EpochDuration::get(); + RingContext::::set(Some(ring_ctx.clone())); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); let tickets = make_tickets(start_slot + 1, 3, &pairs[0]); diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 1824940f0421d..5029e796735c0 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "0.3.2-dev" +version = "0.3.3-dev" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2021" diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 0ffc5e998223c..966220c0f83df 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -19,7 +19,7 @@ use super::{ ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, - SassafrasAuthorityWeight, SassafrasEpochConfiguration, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, + SassafrasEpochConfiguration, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -47,7 +47,7 @@ pub struct PreDigest { #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { /// The authorities. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + pub authorities: Vec, /// The value of randomness to use for the slot-assignment. pub randomness: Randomness, /// Algorithm parameters. If not present, previous epoch parameters are used. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 69cc184c48228..7c23c5de22c26 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -18,7 +18,7 @@ //! Primitives for Sassafras //! TODO-SASS-P2 : write proper docs -// TODO DAVXY enable warnings +// TODO davxy enable warnings // #![deny(warnings)] // #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] @@ -32,15 +32,19 @@ use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; -pub use sp_core::bandersnatch::vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}; +pub use sp_core::bandersnatch::{ + ring_vrf::{RingProver, RingVerifier, RingVrfContext}, + vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, +}; pub mod digests; pub mod inherents; pub mod ticket; pub use ticket::{ - slot_claim_sign_data, slot_claim_vrf_input, ticket_id, ticket_id_threshold, - ticket_id_vrf_input, TicketClaim, TicketData, TicketEnvelope, TicketId, TicketSecret, + slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id, + ticket_id_threshold, ticket_id_vrf_input, TicketBody, TicketClaim, TicketEnvelope, TicketId, + TicketSecret, }; mod app { @@ -72,11 +76,6 @@ pub type AuthoritySignature = app::Signature; /// the main Sassafras module. If that ever changes, then this must, too. pub type AuthorityId = app::Public; -/// The weight of an authority. -// NOTE: we use a unique name for the weight to avoid conflicts with other -// `Weight` types, since the metadata isn't able to disambiguate. -pub type SassafrasAuthorityWeight = u64; - /// Weight of a Sassafras block. /// Primary blocks have a weight of 1 whereas secondary blocks have a weight of 0. pub type SassafrasBlockWeight = u32; @@ -95,7 +94,7 @@ pub struct SassafrasConfiguration { /// The duration of epoch in slots. pub epoch_duration: u64, /// The authorities for the epoch. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + pub authorities: Vec, /// The randomness for the epoch. pub randomness: Randomness, /// Tickets threshold parameters. @@ -143,6 +142,9 @@ pub struct OpaqueKeyOwnershipProof(Vec); sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { + /// Get ring context to be used for ticket construction and verification. + fn ring_context() -> Option; + /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; @@ -151,7 +153,7 @@ sp_api::decl_runtime_apis! { fn slot_ticket_id(slot: Slot) -> Option; /// Get ticket id and data associated to the given slot. - fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketData)>; + fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)>; /// Current epoch information. fn current_epoch() -> Epoch; diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 0cedae3541809..33bb40f99b5ea 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -21,7 +21,10 @@ use super::{Randomness, SASSAFRAS_ENGINE_ID}; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_consensus_slots::Slot; -use sp_core::bandersnatch::vrf::{VrfInput, VrfOutput, VrfSignData}; +use sp_core::bandersnatch::{ + ring_vrf::RingVrfSignature, + vrf::{VrfInput, VrfOutput, VrfSignData}, +}; /// Ticket identifier. /// @@ -31,30 +34,23 @@ pub type TicketId = u128; /// Ticket data persisted on-chain. #[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketData { +pub struct TicketBody { /// Attempt index. pub attempt_idx: u32, /// Ed25519 public key which gets erased when claiming the ticket. pub erased_public: [u8; 32], - /// Ed25519 public key which gets exposed when claiming the ticket. - pub revealed_public: [u8; 32], } -/// Ticket ring proof. -/// TODO-SASS-P3: this is a placeholder. -pub type TicketRingProof = (); +/// Ticket ring vrf signature. +pub type TicketRingSignature = RingVrfSignature; /// Ticket envelope used on during submission. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketEnvelope { - /// VRF output. - pub data: TicketData, - /// VRF pre-output used to generate the ticket id. - pub vrf_preout: VrfOutput, - // /// Pedersen VRF signature - // pub ped_signature: (), - /// Ring VRF proof. - pub ring_proof: TicketRingProof, + /// Ticket body. + pub body: TicketBody, + /// Ring signature. + pub ring_signature: TicketRingSignature, } /// Ticket auxiliary information used to claim the ticket ownership. @@ -112,6 +108,16 @@ pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> ) } +/// Data to be signed via ring-vrf. +pub fn ticket_body_sign_data(ticket_body: &TicketBody) -> VrfSignData { + VrfSignData::from_iter( + &SASSAFRAS_ENGINE_ID, + &[b"ticket-body-transcript", ticket_body.encode().as_slice()], + [], + ) + .expect("can't fail; qed") +} + /// Get ticket-id for a given vrf input and output. /// /// Input generally obtained via `ticket_id_vrf_input`. diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index 2a5013b5be662..ea54e157b318e 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -17,8 +17,6 @@ //! TODO DOCS. -// #![allow(unused)] - #[cfg(feature = "std")] use crate::crypto::Ss58Codec; use crate::crypto::{ @@ -27,19 +25,21 @@ use crate::crypto::{ #[cfg(feature = "full_crypto")] use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; +use bandersnatch_vrfs::CanonicalSerialize; #[cfg(feature = "full_crypto")] use bandersnatch_vrfs::SecretKey; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::PassByInner; -use sp_std::vec::Vec; +use sp_std::{boxed::Box, vec::Vec}; /// Identifier used to match public keys against bandersnatch-vrf keys. pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bs38"); #[cfg(feature = "full_crypto")] const SIGNING_CTX: &[u8] = b"SigningContext"; + #[cfg(feature = "full_crypto")] const SEED_SERIALIZED_LEN: usize = 32; @@ -51,6 +51,23 @@ const SEED_SERIALIZED_LEN: usize = 32; const PUBLIC_SERIALIZED_LEN: usize = 33; const SIGNATURE_SERIALIZED_LEN: usize = 65; +// Edwards form sizes (TODO davxy: probably in the end we'll use this form) +// const PREOUT_SERIALIZED_LEN: usize = 32; + +// Short-Weierstrass form sizes +const PREOUT_SERIALIZED_LEN: usize = 33; + +// Size of serialized pedersen-vrf signature +// Short-Weierstrass form sizes +const PEDERSEN_SIGNATURE_SERIALIZED_LEN: usize = 163; + +// Size of serialized ring-proof +// Short-Weierstrass form sizes +const RING_PROOF_SERIALIZED_LEN: usize = 592; + +// Sise of serialized ring-vrf context params +const RING_VRF_CONTEXT_PARAMS_SERIALIZED_LEN: usize = 147744; + /// XXX. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( @@ -243,7 +260,9 @@ impl TraitPair for Pair { fn public(&self) -> Public { let public = self.0.to_public(); let mut raw = [0; PUBLIC_SERIALIZED_LEN]; - public.0.serialize(raw.as_mut_slice()).expect("key buffer length is good; qed"); + public + .serialize_compressed(raw.as_mut_slice()) + .expect("key buffer length is good; qed"); Public::unchecked_from(raw) } @@ -286,19 +305,14 @@ pub mod vrf { ThinVrfSignature, Transcript, }; - // Edwards form sizes (TODO davxy: probably in the end we'll use this form) - // const PREOUT_SERIALIZED_LEN: usize = 32; - - // Short-Weierstrass form sizes - const PREOUT_SERIALIZED_LEN: usize = 33; - /// Max number of VRF inputs/outputs pub const MAX_VRF_IOS: u32 = 3; - pub(super) type VrfIosVec = BoundedVec>; + /// Bounded vector used for VRF inputs and outputs. + pub type VrfIosVec = BoundedVec>; /// Input to be used for VRF sign and verify operations. - #[derive(Clone)] + #[derive(Clone, Debug)] pub struct VrfInput(pub(super) bandersnatch_vrfs::VrfInput); impl VrfInput { @@ -413,10 +427,10 @@ pub mod vrf { /// VRF signature. #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { - /// VRF signature - pub signature: Signature, /// VRF pre-outputs pub vrf_outputs: VrfIosVec, + /// VRF signature + pub signature: Signature, } #[cfg(feature = "full_crypto")] @@ -515,7 +529,7 @@ pub mod vrf { data: &VrfSignData, signature: &VrfSignature, ) -> bool { - let Ok(public) = PublicKey::deserialize_compressed(&self.0[..]) else { + let Ok(public) = PublicKey::deserialize_compressed(self.as_slice()) else { return false }; @@ -557,9 +571,223 @@ pub mod vrf { } } +/// Ring VRF related types and operations. +pub mod ring_vrf { + use super::{vrf::*, *}; + pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG}; + use bandersnatch_vrfs::{CanonicalDeserialize, PedersenVrfSignature, PublicKey}; + + /// TODO davxy + #[derive(Clone)] + pub struct RingVrfContext(pub KZG); + + impl RingVrfContext { + /// TODO davxy: This is a temporary function with temporary parameters. + /// + /// Initialization cerimony should be performed via some other means + /// For now we call this once here. + pub fn new_testing() -> Self { + let kzg_seed = [0; 32]; + let domain_size = 2usize.pow(10); + let kzg = KZG::testing_kzg_setup(kzg_seed, domain_size); + Self(kzg) + } + + /// Get the keyset size + pub fn max_keyset_size(&self) -> usize { + self.0.max_keyset_size() + } + + /// TODO davxy + pub fn prover(&self, public_keys: &[Public], public_idx: usize) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + if !public_keys.iter().all(|public_key| { + match PublicKey::deserialize_compressed(public_key.as_slice()) { + Ok(pk) => { + let sw_affine = pk.0 .0.into(); + pks.push(sw_affine); + true + }, + _ => false, + } + }) { + return None + }; + let prover_key = self.0.prover_key(pks); + let ring_prover = self.0.init_ring_prover(prover_key, public_idx); + + Some(ring_prover) + } + + /// TODO davxy + pub fn verifier(&self, public_keys: &[Public]) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + if !public_keys.iter().all(|public_key| { + match PublicKey::deserialize_compressed(public_key.as_slice()) { + Ok(pk) => { + let sw_affine = pk.0 .0.into(); + pks.push(sw_affine); + true + }, + _ => false, + } + }) { + return None + }; + + let verifier_key = self.0.verifier_key(pks); + let ring_verifier = self.0.init_ring_verifier(verifier_key); + + Some(ring_verifier) + } + } + + // TODO davxy: why this isn't implemented automagically, is there some other required bound??? + impl codec::EncodeLike for RingVrfContext {} + + impl Encode for RingVrfContext { + fn encode(&self) -> Vec { + let mut buf = Box::new([0; RING_VRF_CONTEXT_PARAMS_SERIALIZED_LEN]); + self.0 + .serialize_compressed(buf.as_mut_slice()) + .expect("preout serialization can't fail"); + buf.encode() + } + } + + impl Decode for RingVrfContext { + fn decode(i: &mut R) -> Result { + let buf = >::decode(i)?; + let kzg = + KZG::deserialize_compressed(buf.as_slice()).map_err(|_| "KZG decode error")?; + Ok(RingVrfContext(kzg)) + } + } + + impl MaxEncodedLen for RingVrfContext { + fn max_encoded_len() -> usize { + <[u8; RING_VRF_CONTEXT_PARAMS_SERIALIZED_LEN]>::max_encoded_len() + } + } + + impl TypeInfo for RingVrfContext { + type Identity = [u8; RING_VRF_CONTEXT_PARAMS_SERIALIZED_LEN]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// Ring VRF signature. + #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct RingVrfSignature { + /// VRF (pre)outputs. + pub outputs: VrfIosVec, + /// Pedersen VRF signature. + signature: [u8; PEDERSEN_SIGNATURE_SERIALIZED_LEN], + /// Ring proof. + ring_proof: [u8; RING_PROOF_SERIALIZED_LEN], + } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// TODO davxy + pub fn ring_vrf_sign(&self, data: &VrfSignData, prover: &RingProver) -> RingVrfSignature { + // Hack used because backend signature type is generic over the number of ios + // @burdges can we provide a vec or boxed version? + match data.vrf_inputs.len() { + 0 => self.ring_vrf_sign_gen::<0>(data, prover), + 1 => self.ring_vrf_sign_gen::<1>(data, prover), + 2 => self.ring_vrf_sign_gen::<2>(data, prover), + 3 => self.ring_vrf_sign_gen::<3>(data, prover), + _ => panic!("Max VRF inputs is set to: {}", MAX_VRF_IOS), + } + } + + fn ring_vrf_sign_gen( + &self, + data: &VrfSignData, + prover: &RingProver, + ) -> RingVrfSignature { + let ios: Vec<_> = data + .vrf_inputs + .iter() + .map(|i| self.0.clone().0.vrf_inout(i.0.clone())) + .collect(); + + let ring_signature: bandersnatch_vrfs::RingVrfSignature = + self.0.sign_ring_vrf(data.transcript.clone(), ios.as_slice(), prover); + + let outputs: Vec<_> = ring_signature.preoutputs.into_iter().map(VrfOutput).collect(); + let outputs = VrfIosVec::truncate_from(outputs); + + let mut signature = [0; PEDERSEN_SIGNATURE_SERIALIZED_LEN]; + ring_signature + .signature + .serialize_compressed(signature.as_mut_slice()) + .expect("ped-signature serialization can't fail"); + + let mut ring_proof = [0; RING_PROOF_SERIALIZED_LEN]; + ring_signature + .ring_proof + .serialize_compressed(ring_proof.as_mut_slice()) + .expect("ring-proof serialization can't fail"); + + RingVrfSignature { outputs, signature, ring_proof } + } + } + + impl RingVrfSignature { + /// TODO davxy + pub fn verify(&self, data: &VrfSignData, verifier: &RingVerifier) -> bool { + let preouts_len = self.outputs.len(); + if preouts_len != data.vrf_inputs.len() { + return false + } + // Hack used because backend signature type is generic over the number of ios + // @burdges can we provide a vec or boxed version? + match preouts_len { + 0 => self.verify_gen::<0>(data, verifier), + 1 => self.verify_gen::<1>(data, verifier), + 2 => self.verify_gen::<2>(data, verifier), + 3 => self.verify_gen::<3>(data, verifier), + _ => panic!("Max VRF input messages is set to: {}", MAX_VRF_IOS), + } + } + + fn verify_gen(&self, data: &VrfSignData, verifier: &RingVerifier) -> bool { + let Ok(preoutputs) = self + .outputs + .iter() + .map(|o| o.0.clone()) + .collect::>() + .into_inner() else { + return false + }; + + let Ok(signature) = PedersenVrfSignature::deserialize_compressed(self.signature.as_slice()) else { + return false + }; + + let Ok(ring_proof) = RingProof::deserialize_compressed(self.ring_proof.as_slice()) else { + return false + }; + + let ring_signature = + bandersnatch_vrfs::RingVrfSignature { signature, preoutputs, ring_proof }; + + let inputs = data.vrf_inputs.iter().map(|i| i.0.clone()); + + ring_signature + .verify_ring_vrf(data.transcript.clone(), inputs, verifier) + .is_ok() + } + } +} + #[cfg(test)] mod tests { - use super::{vrf::*, *}; + use super::{ring_vrf::*, vrf::*, *}; use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0; SEED_SERIALIZED_LEN]; @@ -581,6 +809,7 @@ mod tests { } #[test] + #[ignore] fn derive_hard_known_pair() { let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); // known address of DEV_PHRASE with 1.1 @@ -589,6 +818,7 @@ mod tests { } #[test] + #[ignore] fn verify_known_signature() { let pair = Pair::from_seed(DEV_SEED); let public = pair.public(); @@ -680,6 +910,10 @@ mod tests { let bytes = expected.encode(); + let expected_len = + data.vrf_inputs.len() * PREOUT_SERIALIZED_LEN + SIGNATURE_SERIALIZED_LEN + 1; + assert_eq!(bytes.len(), expected_len); + let decoded = VrfSignature::decode(&mut &bytes[..]).unwrap(); assert_eq!(expected, decoded); @@ -691,4 +925,80 @@ mod tests { let decoded = VrfSignature::decode(&mut &bytes[..]).unwrap(); assert_eq!(expected, decoded); } + + #[test] + fn ring_vrf_sign_verify() { + let ring_ctx = RingVrfContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + // Just pick one... + let prover_idx = 3; + pks[prover_idx] = public.clone(); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + let i3 = VrfInput::new(b"in3", &[(b"domy", b"yay"), (b"domz", b"nay")]); + + let data = VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1, i2, i3]).unwrap(); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let signature = pair.ring_vrf_sign(&data, &prover); + + let verifier = ring_ctx.verifier(&pks).unwrap(); + assert!(signature.verify(&data, &verifier)); + } + + #[test] + fn encode_decode_ring_vrf_signature() { + let ring_ctx = RingVrfContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + let public = pair.public(); + + // Just pick one... + let prover_idx = 3; + pks[prover_idx] = public.clone(); + + let i1 = VrfInput::new(b"in1", &[(b"dom1", b"foo"), (b"dom2", b"bar")]); + let i2 = VrfInput::new(b"in2", &[(b"domx", b"hello")]); + let i3 = VrfInput::new(b"in3", &[(b"domy", b"yay"), (b"domz", b"nay")]); + + let data = VrfSignData::from_iter(b"mydata", &[b"tdata"], [i1, i2, i3]).unwrap(); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let expected = pair.ring_vrf_sign(&data, &prover); + + let bytes = expected.encode(); + + let expected_len = data.vrf_inputs.len() * PREOUT_SERIALIZED_LEN + + PEDERSEN_SIGNATURE_SERIALIZED_LEN + + RING_PROOF_SERIALIZED_LEN + + 1; + assert_eq!(bytes.len(), expected_len); + + let decoded = RingVrfSignature::decode(&mut &bytes[..]).unwrap(); + assert_eq!(expected, decoded); + } + + #[test] + fn encode_decode_ring_vrf_context() { + let ring_ctx = RingVrfContext::new_testing(); + + let encoded = ring_ctx.encode(); + println!("SIZE: {}", encoded.len()); + + assert_eq!(encoded.len(), RingVrfContext::max_encoded_len()); + + let _decoded = RingVrfContext::decode(&mut &encoded[..]).unwrap(); + + // TODO davxy... just use unsafe pointers comparison + } } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 050effbb1a003..124cebf643453 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1142,7 +1142,12 @@ pub trait Crypto { Ok(pubkey.serialize()) } - /// DAVXY + /// Generate a `bandersnatch` key pair for the given key type using an optional + /// `seed` and store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. fn bandersnatch_generate( &mut self, id: KeyTypeId, diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index d67f2cb2d5c69..4c1c7d5855a95 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -209,6 +209,15 @@ pub trait Keystore: Send + Sync { input: &bandersnatch::vrf::VrfInput, ) -> Result, Error>; + /// DAVXY TODO + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + input: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> Result, Error>; + #[cfg(feature = "bls-experimental")] /// Returns all bls12-381 public keys for the given key type. fn bls381_public_keys(&self, id: KeyTypeId) -> Vec; diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index b9c685397fb6f..d34bb9658ce41 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -245,6 +245,19 @@ impl Keystore for MemoryKeystore { self.vrf_sign::(key_type, public, data) } + fn bandersnatch_ring_vrf_sign( + &self, + key_type: KeyTypeId, + public: &bandersnatch::Public, + data: &bandersnatch::vrf::VrfSignData, + prover: &bandersnatch::ring_vrf::RingProver, + ) -> Result, Error> { + let sig = self + .pair::(key_type, public) + .map(|pair| pair.ring_vrf_sign(data, prover)); + Ok(sig) + } + fn bandersnatch_vrf_output( &self, key_type: KeyTypeId, @@ -370,7 +383,7 @@ mod tests { } #[test] - fn vrf_sign() { + fn sr25519_vrf_sign() { let store = MemoryKeystore::new(); let secret_uri = "//Alice"; @@ -399,7 +412,7 @@ mod tests { } #[test] - fn vrf_output() { + fn sr25519_vrf_output() { let store = MemoryKeystore::new(); let secret_uri = "//Alice"; @@ -446,4 +459,9 @@ mod tests { let res = store.ecdsa_sign_prehashed(ECDSA, &pair.public(), &msg).unwrap(); assert!(res.is_some()); } + + #[test] + fn bandersnatch_vrf_sign() { + panic!("TODO") + } } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 2b863b695561d..92c160b3868cb 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -18,7 +18,7 @@ sp-application-crypto = { version = "23.0.0", default-features = false, path = " sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-beefy = { version = "4.0.0-dev", default-features = false, path = "../../primitives/consensus/beefy" } -sp-consensus-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } @@ -36,7 +36,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } -pallet-sassafras = { version = "0.3.2-dev", default-features = false, path = "../../frame/sassafras" } +pallet-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../frame/balances" } pallet-root-testing = { version = "1.0.0-dev", default-features = false, path = "../../frame/root-testing" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../frame/sudo" } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 21331d1f43854..554e6142fa75a 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -135,7 +135,7 @@ impl GenesisStorageBuilder { epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), }, sassafras: pallet_sassafras::GenesisConfig { - authorities: authorities_bandersnatch.into_iter().map(|x| (x.into(), 1)).collect(), + authorities: authorities_bandersnatch.into_iter().map(|x| x.into()).collect(), epoch_config: sp_consensus_sassafras::SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32, diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index f588c9a601748..00421d26f2478 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -683,6 +683,10 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + fn submit_tickets_unsigned_extrinsic( tickets: Vec ) -> bool { @@ -703,7 +707,7 @@ impl_runtime_apis! { fn slot_ticket( slot: sp_consensus_sassafras::Slot - ) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketData)> { + ) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { Sassafras::slot_ticket(slot) } From 695fa7e7e7da3ec1ec4efc1461819a78e047729f Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sun, 18 Jun 2023 14:10:17 +0200 Subject: [PATCH 43/62] Dummy implementations for RuntimePublic --- primitives/application-crypto/src/bandersnatch.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/primitives/application-crypto/src/bandersnatch.rs b/primitives/application-crypto/src/bandersnatch.rs index 68adee8061c00..b53963c9fff3c 100644 --- a/primitives/application-crypto/src/bandersnatch.rs +++ b/primitives/application-crypto/src/bandersnatch.rs @@ -33,8 +33,7 @@ impl RuntimePublic for Public { type Signature = Signature; fn all(_key_type: KeyTypeId) -> Vec { - // sp_io::crypto::bandersnatch_public_keys(key_type) - unimplemented!() + Vec::new() } fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { @@ -42,13 +41,11 @@ impl RuntimePublic for Public { } fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { - // sp_io::crypto::bandersnatch_sign(key_type, self, msg.as_ref()) - unimplemented!() + None } fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { - // sp_io::crypto::bandersnatch_verify(signature, msg.as_ref(), self) - unimplemented!() + false } fn to_raw_vec(&self) -> Vec { From 1bc33af2d5456cd90b2c61feac2dece4ea590ae5 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 22 Jun 2023 13:11:12 +0200 Subject: [PATCH 44/62] Use upstream ring-vrf --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 548dcf5e8e412..3cf49e46b9dd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -481,7 +481,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#67265d19ee2b0e6f49d76a6f7a00aa3addb3e561" +source = "git+https://github.com/w3f/ring-vrf#07d1df31614b66e172c6b1cb2622e9419f502676" dependencies = [ "ark-ec", "ark-ff", @@ -529,7 +529,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#67265d19ee2b0e6f49d76a6f7a00aa3addb3e561" +source = "git+https://github.com/w3f/ring-vrf#07d1df31614b66e172c6b1cb2622e9419f502676" dependencies = [ "ark-ff", "ark-serialize", @@ -795,7 +795,7 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" version = "0.0.1" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#67265d19ee2b0e6f49d76a6f7a00aa3addb3e561" +source = "git+https://github.com/w3f/ring-vrf#07d1df31614b66e172c6b1cb2622e9419f502676" dependencies = [ "ark-bls12-381", "ark-ec", @@ -2197,7 +2197,7 @@ checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/davxy/ring-vrf?branch=refactory-and-tests#67265d19ee2b0e6f49d76a6f7a00aa3addb3e561" +source = "git+https://github.com/w3f/ring-vrf#07d1df31614b66e172c6b1cb2622e9419f502676" dependencies = [ "ark-ec", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index df2a8ca903b05..c15f0aaf431b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -343,8 +343,8 @@ lto = "fat" # https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units codegen-units = 1 -[patch."https://github.com/w3f/ring-vrf"] -bandersnatch_vrfs = { git = "https://github.com/davxy/ring-vrf", branch = "refactory-and-tests" } +# [patch."https://github.com/w3f/ring-vrf"] +# bandersnatch_vrfs = { git = "https://github.com/davxy/ring-vrf", branch = "refactory-and-tests" } # bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } [patch."https://github.com/w3f/fflonk"] From 68222f7e16a1898430fad030290302e6a639ff19 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 9 Aug 2023 19:34:04 +0200 Subject: [PATCH 45/62] Fixed sassafras primitives after master merge --- Cargo.lock | 229 ++++++++++++++++--- Cargo.toml | 14 -- primitives/consensus/sassafras/Cargo.toml | 6 +- primitives/consensus/sassafras/src/lib.rs | 4 +- primitives/consensus/sassafras/src/ticket.rs | 43 ++-- primitives/io/src/lib.rs | 1 - 6 files changed, 222 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0289ea601eab0..de3e0d7d39fc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -506,6 +506,12 @@ dependencies = [ "sha3", ] +[[package]] +name = "array-bytes" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" + [[package]] name = "array-bytes" version = "6.1.0" @@ -753,7 +759,7 @@ dependencies = [ name = "binary-merkle-tree" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "env_logger 0.9.3", "hash-db", "log", @@ -2395,7 +2401,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame-benchmarking" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "frame-support", "frame-support-procedural", "frame-system", @@ -2423,7 +2429,7 @@ name = "frame-benchmarking-cli" version = "4.0.0-dev" dependencies = [ "Inflector", - "array-bytes", + "array-bytes 6.1.0", "chrono", "clap 4.3.2", "comfy-table", @@ -2534,7 +2540,7 @@ dependencies = [ name = "frame-executive" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "frame-support", "frame-system", "frame-try-runtime", @@ -2591,7 +2597,7 @@ name = "frame-support" version = "4.0.0-dev" dependencies = [ "aquamarine", - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "bitflags", "environmental", @@ -5101,7 +5107,7 @@ dependencies = [ name = "node-bench" version = "0.9.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "clap 4.3.2", "derive_more", "fs_extra", @@ -5137,7 +5143,7 @@ dependencies = [ name = "node-cli" version = "3.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_cmd", "clap 4.3.2", "clap_complete", @@ -5324,6 +5330,82 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.3-dev" +dependencies = [ + "clap 4.3.2", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.3-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -5665,7 +5747,7 @@ dependencies = [ name = "pallet-alliance" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "frame-benchmarking", "frame-support", "frame-system", @@ -5962,7 +6044,7 @@ dependencies = [ name = "pallet-beefy-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "binary-merkle-tree", "frame-support", "frame-system", @@ -6039,7 +6121,7 @@ dependencies = [ name = "pallet-contracts" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "bitflags", "env_logger 0.9.3", @@ -6544,7 +6626,7 @@ dependencies = [ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "env_logger 0.9.3", "frame-benchmarking", "frame-support", @@ -6991,6 +7073,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.3-dev" +dependencies = [ + "array-bytes 4.2.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -7302,7 +7404,7 @@ dependencies = [ name = "pallet-transaction-storage" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "frame-benchmarking", "frame-support", "frame-system", @@ -8683,7 +8785,7 @@ dependencies = [ name = "sc-cli" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "chrono", "clap 4.3.2", "fdlimit", @@ -8752,7 +8854,7 @@ dependencies = [ name = "sc-client-db" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "criterion", "hash-db", "kitchensink-runtime", @@ -8920,7 +9022,7 @@ dependencies = [ name = "sc-consensus-beefy" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-channel", "async-trait", "fnv", @@ -8996,7 +9098,7 @@ name = "sc-consensus-grandpa" version = "0.10.0-dev" dependencies = [ "ahash 0.8.3", - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "async-trait", "dyn-clone", @@ -9124,6 +9226,43 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.3-dev" +dependencies = [ + "async-trait", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -9151,7 +9290,7 @@ dependencies = [ name = "sc-executor" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "criterion", "env_logger 0.9.3", @@ -9238,7 +9377,7 @@ dependencies = [ name = "sc-keystore" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "parking_lot 0.12.1", "serde_json", "sp-application-crypto", @@ -9252,7 +9391,7 @@ dependencies = [ name = "sc-network" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "async-channel", "async-trait", @@ -9370,7 +9509,7 @@ dependencies = [ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-channel", "futures", "libp2p-identity", @@ -9390,7 +9529,7 @@ dependencies = [ name = "sc-network-statement" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-channel", "futures", "libp2p", @@ -9407,7 +9546,7 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-channel", "async-trait", "fork-tree", @@ -9476,7 +9615,7 @@ dependencies = [ name = "sc-network-transactions" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "futures", "libp2p", "log", @@ -9493,7 +9632,7 @@ dependencies = [ name = "sc-offchain" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "bytes", "fnv", "futures", @@ -9613,7 +9752,7 @@ dependencies = [ name = "sc-rpc-spec-v2" version = "0.10.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "futures", "futures-util", @@ -9726,7 +9865,7 @@ dependencies = [ name = "sc-service-test" version = "2.0.0" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-channel", "fdlimit", "futures", @@ -9899,7 +10038,7 @@ dependencies = [ name = "sc-transaction-pool" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "async-trait", "criterion", @@ -10692,7 +10831,7 @@ dependencies = [ name = "sp-consensus-beefy" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "lazy_static", "parity-scale-codec", "scale-info", @@ -10736,6 +10875,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.3-dev" +dependencies = [ + "async-trait", + "merlin 2.0.1", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -10751,7 +10910,7 @@ dependencies = [ name = "sp-core" version = "21.0.0" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "arrayvec 0.7.4", "bandersnatch_vrfs", "bitflags", @@ -10965,7 +11124,7 @@ dependencies = [ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "ckb-merkle-mountain-range", "log", "parity-scale-codec", @@ -11163,7 +11322,7 @@ dependencies = [ name = "sp-state-machine" version = "0.28.0" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "assert_matches", "hash-db", "log", @@ -11285,7 +11444,7 @@ name = "sp-trie" version = "22.0.0" dependencies = [ "ahash 0.8.3", - "array-bytes", + "array-bytes 6.1.0", "criterion", "hash-db", "hashbrown 0.13.2", @@ -11649,7 +11808,7 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "async-trait", "futures", "parity-scale-codec", @@ -11674,7 +11833,7 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ - "array-bytes", + "array-bytes 6.1.0", "frame-executive", "frame-support", "frame-system", @@ -11684,6 +11843,7 @@ dependencies = [ "log", "pallet-babe", "pallet-balances", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -11700,6 +11860,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-genesis-builder", diff --git a/Cargo.toml b/Cargo.toml index 61c35bd63bbd0..da15cf4380ca1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -390,17 +390,3 @@ inherits = "release" lto = "fat" # https://doc.rust-lang.org/rustc/codegen-options/index.html#codegen-units codegen-units = 1 - -# [patch."https://github.com/w3f/ring-vrf"] -# bandersnatch_vrfs = { git = "https://github.com/davxy/ring-vrf", branch = "refactory-and-tests" } -# bandersnatch_vrfs = { path = "/mnt/ssd/develop/w3f/ring-vrf/bandersnatch_vrfs" } - -[patch."https://github.com/w3f/fflonk"] -fflonk = { git = "https://github.com/davxy/fflonk", branch = "working-fork" } -# fflonk = { path = "/mnt/ssd/develop/w3f/fflonk" } - -[patch."https://github.com/w3f/ring-proof"] -common = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" } -ring = { git = "https://github.com/davxy/ring-proof", branch = "working-fork" } -# common = { path = "/mnt/ssd/develop/w3f/ring-proof/common" } -# ring = { path = "/mnt/ssd/develop/w3f/ring-proof/ring" } diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 5029e796735c0..bf0ff0a7f904d 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -20,11 +20,11 @@ scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-featu scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } -sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } -sp-core = { version = "21.0.0", default-features = false, path = "../../core" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../../keystore" } +sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../../keystore", features = ["bandersnatch-experimental"] } sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } sp-std = { version = "8.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 7c23c5de22c26..45e585c72e93c 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -33,7 +33,7 @@ use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_core::bandersnatch::{ - ring_vrf::{RingProver, RingVerifier, RingVrfContext}, + ring_vrf::{RingContext, RingProver, RingVerifier}, vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, }; @@ -143,7 +143,7 @@ sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { /// Get ring context to be used for ticket construction and verification. - fn ring_context() -> Option; + fn ring_context() -> Option; /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 33bb40f99b5ea..c8f95b165c6d9 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -68,18 +68,26 @@ pub struct TicketClaim { pub erased_signature: [u8; 64], } +fn vrf_input_from_data( + domain: &[u8], + data: impl IntoIterator>, +) -> VrfInput { + let raw = data.into_iter().fold(Vec::new(), |mut v, e| { + let bytes = e.as_ref(); + v.extend_from_slice(bytes); + v.extend_from_slice(&bytes.len().to_le_bytes()); + v + }); + VrfInput::new(domain, raw) +} + /// VRF input to claim slot ownership during block production. /// /// Input randomness is current epoch randomness. pub fn slot_claim_vrf_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { - VrfInput::new( - &SASSAFRAS_ENGINE_ID, - &[ - (b"type", b"slot-claim"), - (b"randomness", randomness), - (b"slot", &slot.to_le_bytes()), - (b"epoch", &epoch.to_le_bytes()), - ], + vrf_input_from_data( + b"sassafras-claim-v1.0", + [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], ) } @@ -88,34 +96,27 @@ pub fn slot_claim_vrf_input(randomness: &Randomness, slot: Slot, epoch: u64) -> /// Input randomness is current epoch randomness. pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { let vrf_input = slot_claim_vrf_input(randomness, slot, epoch); - - VrfSignData::from_iter(&SASSAFRAS_ENGINE_ID, &[b"slot-claim-transcript"], [vrf_input]) - .expect("can't fail; qed") + VrfSignData::new_unchecked(&SASSAFRAS_ENGINE_ID, Some("slot-claim-transcript"), Some(vrf_input)) } /// VRF input to generate the ticket id. /// /// Input randomness is current epoch randomness. pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - VrfInput::new( - &SASSAFRAS_ENGINE_ID, - &[ - (b"type", b"ticket-id"), - (b"randomness", randomness), - (b"attempt", &attempt.to_le_bytes()), - (b"epoch", &epoch.to_le_bytes()), - ], + vrf_input_from_data( + b"sassafras-ticket-v1.0", + [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], ) } /// Data to be signed via ring-vrf. +/// TODO davxy: ticket_body is not a vrf input??? pub fn ticket_body_sign_data(ticket_body: &TicketBody) -> VrfSignData { - VrfSignData::from_iter( + VrfSignData::new_unchecked( &SASSAFRAS_ENGINE_ID, &[b"ticket-body-transcript", ticket_body.encode().as_slice()], [], ) - .expect("can't fail; qed") } /// Get ticket-id for a given vrf input and output. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 661a50ee15e65..0bc434a2a59fc 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -95,7 +95,6 @@ use sp_keystore::KeystoreExt; #[cfg(feature = "bandersnatch-experimental")] use sp_core::bandersnatch; use sp_core::{ - bandersnatch, crypto::KeyTypeId, ecdsa, ed25519, offchain::{ From 060e4012159b0cf86f782d10b4fb248a1cf5c405 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 11 Aug 2023 14:03:57 +0200 Subject: [PATCH 46/62] Improve sassafras pallet logic and cleanup stale data --- Cargo.lock | 1 + frame/sassafras/Cargo.toml | 5 +- frame/sassafras/src/lib.rs | 176 +++++++---- frame/sassafras/src/mock.rs | 148 +++++++--- frame/sassafras/src/session.rs | 16 +- frame/sassafras/src/tests.rs | 296 ++++++++++--------- primitives/consensus/sassafras/src/lib.rs | 2 +- primitives/consensus/sassafras/src/ticket.rs | 8 +- primitives/core/src/bandersnatch.rs | 51 +++- 9 files changed, 436 insertions(+), 267 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de3e0d7d39fc7..701de62cd1c56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7078,6 +7078,7 @@ name = "pallet-sassafras" version = "0.3.3-dev" dependencies = [ "array-bytes 4.2.0", + "env_logger 0.10.0", "frame-benchmarking", "frame-support", "frame-system", diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 08d10559caab1..08cba5a8dd1ac 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } +scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } @@ -29,6 +29,7 @@ sp-std = { version = "8.0.0", default-features = false, path = "../../primitives [dev-dependencies] array-bytes = "4.1" sp-core = { version = "21.0.0", path = "../../primitives/core" } +env_logger = "0.10" [features] default = ["std"] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index e04fb7a271238..be3c14a4ebb9a 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -51,10 +51,13 @@ use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; -use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +use frame_system::{ + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::{BlockNumberFor, HeaderFor}, +}; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, Epoch, EquivocationProof, Randomness, RingVrfContext, SassafrasConfiguration, + AuthorityId, Epoch, EquivocationProof, Randomness, RingContext, SassafrasConfiguration, SassafrasEpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; @@ -218,7 +221,7 @@ pub mod pallet { /// Tickets to be used for current and next epoch. #[pallet::storage] - pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody, ValueQuery>; + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; /// Next epoch tickets accumulator. /// Special `u32::MAX` key is reserved for a partially sorted segment. @@ -232,28 +235,26 @@ pub mod pallet { /// In practice: Updatable Universal Reference String and the seed. #[pallet::storage] #[pallet::getter(fn ring_context)] - pub type RingContext = StorageValue<_, RingVrfContext>; + pub type RingVrfContext = StorageValue<_, RingContext>; /// Genesis configuration for Sassafras protocol. - #[derive(Default)] #[pallet::genesis_config] - pub struct GenesisConfig { + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { /// Genesis authorities. pub authorities: Vec, /// Genesis epoch configuration. pub epoch_config: SassafrasEpochConfiguration, + /// Phantom config + #[serde(skip)] + pub _phantom: sp_std::marker::PhantomData, } #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { + impl BuildGenesisConfig for GenesisConfig { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); - - // TODO davxy : temporary code to generate a testing ring context - log::debug!(target: LOG_TARGET, "Building new testing ring context"); - let ring_ctx = RingVrfContext::new_testing(); - RingContext::::set(Some(ring_ctx)); } } @@ -318,7 +319,7 @@ pub mod pallet { let randomness = pre_digest .vrf_signature - .vrf_outputs + .outputs .get(0) .expect("vrf preout should have been already checked by the client; qed") .make_bytes::(RANDOMNESS_VRF_CONTEXT, &vrf_input); @@ -361,13 +362,14 @@ pub mod pallet { log::debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); log::debug!(target: LOG_TARGET, "LOADING RING CTX"); - let Some(ring_ctx) = RingContext::::get() else { + let Some(ring_ctx) = RingVrfContext::::get() else { return Err("Ring context not initialized".into()) }; log::debug!(target: LOG_TARGET, "... Loaded"); - log::debug!(target: LOG_TARGET, "Building prover"); + // TODO @davxy this should be done once per epoch and with the NEXT EPOCH AUTHORITIES!!! let pks: Vec<_> = Self::authorities().iter().map(|auth| *auth.as_ref()).collect(); + log::debug!(target: LOG_TARGET, "Building verifier. Ring size {}", pks.len()); let verifier = ring_ctx.verifier(pks.as_slice()).unwrap(); log::debug!(target: LOG_TARGET, "... Built"); @@ -399,7 +401,7 @@ pub mod pallet { let Some(vrf_preout) = ticket.ring_signature.outputs.get(0) else { log::debug!(target: LOG_TARGET, "Missing ticket pre-output from ring signature"); - continue; + continue }; let ticket_id = sp_consensus_sassafras::ticket_id(&vrf_input, &vrf_preout); if ticket_id >= ticket_threshold { @@ -411,7 +413,7 @@ pub mod pallet { sign_data.push_vrf_input(vrf_input).expect("Can't fail"); if ticket.ring_signature.verify(&sign_data, &verifier) { - TicketsData::::set(ticket_id, ticket.body.clone()); + TicketsData::::set(ticket_id, Some(ticket.body)); segment .try_push(ticket_id) .expect("has same length as bounded input vector; qed"); @@ -471,7 +473,7 @@ pub mod pallet { #[pallet::weight({0})] pub fn report_equivocation_unsigned( origin: OriginFor, - _equivocation_proof: EquivocationProof, + _equivocation_proof: EquivocationProof>, //key_owner_proof: T::KeyOwnerProof, ) -> DispatchResult { ensure_none(origin)?; @@ -577,7 +579,7 @@ pub mod pallet { impl Pallet { /// Determine whether an epoch change should take place at this block. /// Assumes that initialization has already taken place. - pub fn should_end_epoch(now: T::BlockNumber) -> bool { + pub fn should_end_epoch(now: BlockNumberFor) -> bool { // The epoch has technically ended during the passage of time between this block and the // last, but we have to "end" the epoch now, since there is no earlier possible block we // could have done it. @@ -601,11 +603,47 @@ impl Pallet { slot.checked_sub(Self::current_epoch_start().into()).unwrap_or(u64::MAX) } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_end_epoch` - /// has returned `true`, and the caller is the only caller of this function. + /// Remove all tickets related data. /// - /// Typically, this is not handled directly, but by a higher-level validator-set - /// manager like `pallet-session`. + /// May not be efficient as the calling places may repeat some of this operations + /// but is a very extraordinary operation (hopefully never happens in production) + /// and better safe than sorry. + fn reset_tickets_data() { + let tickets_metadata = TicketsMeta::::get(); + + // Remove even-epoch data. + let tickets_count = tickets_metadata.tickets_count[0]; + (0..tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((0, idx)) { + TicketsData::::remove(id); + } + }); + + // Remove odd-epoch data. + let tickets_count = tickets_metadata.tickets_count[1]; + (0..tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((1, idx)) { + TicketsData::::remove(id); + } + }); + + // Remove all outstanding tickets segments. + (0..tickets_metadata.segments_count).into_iter().for_each(|i| { + NextTicketsSegments::::remove(i); + }); + NextTicketsSegments::::remove(u32::MAX); + + // Reset tickets metadata + TicketsMeta::::set(Default::default()); + } + + /// Enact an epoch change. + /// + /// Should be done on every block where `should_end_epoch` has returned `true`, and the caller + /// is the only caller of this function. + /// + /// Typically, this is not handled directly, but by a higher-level component implementing the + /// `EpochChangeTrigger` or `OneSessionHandler` trait. /// /// If we detect one or more skipped epochs the policy is to use the authorities and values /// from the first skipped epoch. The tickets are invalidated. @@ -628,10 +666,15 @@ impl Pallet { let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); if slot_idx >= T::EpochDuration::get() { - // Detected one or more skipped epochs, kill tickets and recompute epoch index. - TicketsMeta::::kill(); - epoch_idx += u64::from(slot_idx) / T::EpochDuration::get(); + // Detected one or more skipped epochs, clear tickets data and recompute epoch index. + Self::reset_tickets_data(); + let skipped_epochs = u64::from(slot_idx) / T::EpochDuration::get(); + epoch_idx += skipped_epochs; + log::warn!(target: LOG_TARGET, "Detected {} skipped epochs, resuming from epoch {}", skipped_epochs, epoch_idx); } + + let mut tickets_metadata = TicketsMeta::::get(); + EpochIndex::::put(epoch_idx); let next_epoch_index = epoch_idx @@ -660,15 +703,24 @@ impl Pallet { Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); let epoch_tag = (epoch_idx & 1) as u8; - let mut tickets_metadata = TicketsMeta::::get(); // Optionally finish sorting if tickets_metadata.segments_count != 0 { Self::sort_tickets(tickets_metadata.segments_count, epoch_tag, &mut tickets_metadata); } - // Clear the prev (equal to the next) epoch tickets counter. + + // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. + // Ids are left since are just cyclically overwritten on-the-go. let next_epoch_tag = epoch_tag ^ 1; - tickets_metadata.tickets_count[next_epoch_tag as usize] = 0; - TicketsMeta::::set(tickets_metadata); + let prev_epoch_tickets_count = &mut tickets_metadata.tickets_count[next_epoch_tag as usize]; + if *prev_epoch_tickets_count != 0 { + (0..*prev_epoch_tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((next_epoch_tag, idx)) { + TicketsData::::remove(id); + } + }); + *prev_epoch_tickets_count = 0; + TicketsMeta::::set(tickets_metadata); + } } /// Call this function on epoch change to update the randomness. @@ -764,7 +816,7 @@ impl Pallet { Epoch { epoch_idx, start_slot, config } } - /// Current epoch configuration. + /// Next epoch configuration. pub fn next_epoch() -> Epoch { let config = SassafrasConfiguration { slot_duration: T::SlotDuration::get(), @@ -851,62 +903,66 @@ impl Pallet { /// Refer to the `slot_ticket_id` documentation for the slot-ticket association /// criteria. pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { - Self::slot_ticket_id(slot).map(|id| (id, TicketsData::::get(id))) + Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) } // Lexicographically sort the tickets who belongs to the next epoch. // - // Tickets are fetched from at most `max_iter` segments received via the `submit_tickets` - // extrinsic. + // Tickets are fetched from at most `max_segments` segments. // // The resulting sorted vector is optionally truncated to contain at most `MaxTickets` // entries. If all the segments were consumed then the sorted vector is saved as the // next epoch tickets, else it is saved to be used by next calls to this function. - fn sort_tickets(max_iter: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { - let max_iter = max_iter.min(metadata.segments_count); + fn sort_tickets(mut max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { + max_segments = max_segments.min(metadata.segments_count); let max_tickets = T::MaxTickets::get() as usize; // Fetch the sorted result (if any). - let mut new_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); + let mut sorted_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); - let mut require_sort = max_iter != 0; + let mut require_sort = max_segments != 0; - let mut sup = if new_segment.len() >= max_tickets { - new_segment[new_segment.len() - 1] - } else { - TicketId::MAX - }; + // There is an upper bound to check only if we already sorted the max number + // of allowed tickets. + let mut upper_bound = *sorted_segment.get(max_tickets).unwrap_or(&TicketId::MAX); // Consume at most `max_iter` segments. - for _ in 0..max_iter { + // During the process remove every stale ticket from `TicketsData` storage. + for _ in 0..max_segments { metadata.segments_count -= 1; let segment = NextTicketsSegments::::take(metadata.segments_count); // Merge only elements below the current sorted segment sup. - segment.into_iter().filter(|t| t < &sup).for_each(|t| new_segment.push(t)); - if new_segment.len() > max_tickets { + segment.iter().for_each(|id| { + if id < &upper_bound { + sorted_segment.push(*id); + } else { + TicketsData::::remove(id); + } + }); + if sorted_segment.len() > max_tickets { require_sort = false; - // Sort and truncnate the result - new_segment.sort_unstable(); - new_segment[max_tickets..].iter().for_each(|id| TicketsData::::remove(id)); - new_segment.truncate(max_tickets); - sup = new_segment[max_tickets - 1]; + // Sort and truncate good tickets. + sorted_segment.sort_unstable(); + sorted_segment[max_tickets..].iter().for_each(|id| TicketsData::::remove(id)); + sorted_segment.truncate(max_tickets); + upper_bound = sorted_segment[max_tickets - 1]; } } if require_sort { - new_segment.sort_unstable(); + sorted_segment.sort_unstable(); } if metadata.segments_count == 0 { - // Sort is over, write to next epoch map. - new_segment.iter().enumerate().for_each(|(i, id)| { + // Sorting is over, write to next epoch map. + sorted_segment.iter().enumerate().for_each(|(i, id)| { TicketsIds::::insert((epoch_tag, i as u32), id); }); - metadata.tickets_count[epoch_tag as usize] = new_segment.len() as u32; + metadata.tickets_count[epoch_tag as usize] = sorted_segment.len() as u32; } else { // Keep the partial result for next calls. - NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(new_segment)); + NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(sorted_segment)); } } @@ -934,7 +990,7 @@ impl Pallet { /// /// Unsigned extrinsic is created with a call to `report_equivocation_unsigned`. pub fn submit_unsigned_equivocation_report( - equivocation_proof: EquivocationProof, + equivocation_proof: EquivocationProof>, //key_owner_proof: T::KeyOwnerProof, ) -> bool { let call = Call::report_equivocation_unsigned { @@ -956,7 +1012,7 @@ impl Pallet { pub trait EpochChangeTrigger { /// Trigger an epoch change, if any should take place. This should be called /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + fn trigger(now: BlockNumberFor); } /// A type signifying to Sassafras that an external trigger for epoch changes @@ -964,7 +1020,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. + fn trigger(_: BlockNumberFor) {} // nothing - trigger is external. } /// A type signifying to Sassafras that it should perform epoch changes with an internal @@ -972,7 +1028,7 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { + fn trigger(now: BlockNumberFor) { if >::should_end_epoch(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index e3164f4132a3a..6d03bb935f964 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -17,53 +17,46 @@ //! Test utilities for Sassafras pallet. -use crate::{self as pallet_sassafras, SameAuthoritiesForever}; +use crate::{self as pallet_sassafras, SameAuthoritiesForever, *}; -use frame_support::traits::{ConstU32, ConstU64, GenesisBuild, OnFinalize, OnInitialize}; +use frame_support::traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ digests::PreDigest, AuthorityIndex, AuthorityPair, RingProver, SassafrasEpochConfiguration, - Slot, TicketBody, TicketEnvelope, VrfSignature, + Slot, TicketBody, TicketEnvelope, TicketId, VrfSignature, }; use sp_core::{ - crypto::{Pair, VrfSecret}, + crypto::{Pair, VrfSecret, Wraps}, H256, U256, }; use sp_runtime::{ testing::{Digest, DigestItem, Header, TestXt}, traits::IdentityLookup, + BuildStorage, }; const SLOT_DURATION: u64 = 1000; const EPOCH_DURATION: u64 = 10; const MAX_TICKETS: u32 = 6; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -type DummyValidatorId = u64; - -type AccountData = u128; - impl frame_system::Config for Test { - type RuntimeEvent = RuntimeEvent; type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; - type Index = u64; - type BlockNumber = u64; + type Nonce = u64; type RuntimeCall = RuntimeCall; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; - type AccountId = DummyValidatorId; + type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Block = frame_system::mocking::MockBlock; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; - type AccountData = AccountData; + type AccountData = u128; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); @@ -96,10 +89,7 @@ impl pallet_sassafras::Config for Test { } frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, + pub enum Test { System: frame_system, Sassafras: pallet_sassafras, @@ -115,49 +105,57 @@ pub const TEST_EPOCH_CONFIGURATION: SassafrasEpochConfiguration = /// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { - new_test_ext_with_pairs(authorities_len).1 + new_test_ext_with_pairs(authorities_len, false).1 } /// Build and returns test storage externalities and authority set pairs used /// by Sassafras genesis configuration. pub fn new_test_ext_with_pairs( authorities_len: usize, + with_ring_context: bool, ) -> (Vec, sp_io::TestExternalities) { + // @davxy temporary logging facility + // env_logger::init(); + let pairs = (0..authorities_len) .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) .collect::>(); let authorities = pairs.iter().map(|p| p.public()).collect(); - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let config = - pallet_sassafras::GenesisConfig { authorities, epoch_config: TEST_EPOCH_CONFIGURATION }; - >::assimilate_storage( - &config, - &mut storage, - ) + pallet_sassafras::GenesisConfig:: { + authorities, + epoch_config: TEST_EPOCH_CONFIGURATION, + _phantom: sp_std::marker::PhantomData, + } + .assimilate_storage(&mut storage) .unwrap(); - (pairs, storage.into()) + let mut ext: sp_io::TestExternalities = storage.into(); + + if with_ring_context { + ext.execute_with(|| { + log::debug!("Building new testing ring context"); + let ring_ctx = RingContext::new_testing(); + RingVrfContext::::set(Some(ring_ctx.clone())); + }); + } + + (pairs, ext) } fn make_ticket_with_prover( - slot: Slot, attempt: u32, pair: &AuthorityPair, prover: &RingProver, ) -> TicketEnvelope { - println!("ATTEMPT: {}", attempt); - let mut epoch = Sassafras::epoch_index(); - let mut randomness = Sassafras::randomness(); + log::debug!("attempt: {}", attempt); - // Check if epoch is going to change on initialization - let epoch_start = Sassafras::current_epoch_start(); - if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { - epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); - randomness = crate::NextRandomness::::get(); - } + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); let body = TicketBody { attempt_idx: attempt, erased_public: [0; 32] }; @@ -169,8 +167,7 @@ fn make_ticket_with_prover( let ring_signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); // Ticket-id can be generated via vrf-preout. - // We don't care that much about the value here. - + // We don't care that much about its value here. TicketEnvelope { body, ring_signature } } @@ -191,24 +188,83 @@ pub fn make_prover(pair: &AuthorityPair) -> RingProver { }) .collect(); - println!("Make prover"); + log::debug!("Building prover. Ring size: {}", pks.len()); let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); - println!("Done"); + log::debug!("Done"); prover } +pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + let body = TicketBody { attempt_idx, erased_public: [0; 32] }; + + let input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt_idx, epoch); + let output = pair.as_inner_ref().vrf_output(&input); + + let id = sp_consensus_sassafras::ticket_id(&input, &output); + + (id, body) +} + +pub fn make_ticket_bodies(number: u32, pair: &AuthorityPair) -> Vec<(TicketId, TicketBody)> { + (0..number).into_iter().map(|i| make_ticket_body(i, pair)).collect() +} + /// Construct at most `attempts` tickets envelopes for the given `slot`. /// TODO-SASS-P3: filter out invalid tickets according to test threshold. /// E.g. by passing an optional threshold -pub fn make_tickets(slot: Slot, attempts: u32, pair: &AuthorityPair) -> Vec { +pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { let prover = make_prover(pair); (0..attempts) .into_iter() - .map(|attempt| make_ticket_with_prover(slot, attempt, pair, &prover)) + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) .collect() } +/// Persist the given tickets in `segments_count` separated segments by appending +/// them to the storage segments list. +/// +/// If segments_count > tickets.len() => segments_count = tickets.len() +pub fn persist_next_epoch_tickets_as_segments( + tickets: &[(TicketId, TicketBody)], + mut segments_count: usize, +) { + if segments_count > tickets.len() { + segments_count = tickets.len(); + } + let segment_len = tickets.len() / segments_count; + + // Update metadata + let mut meta = TicketsMeta::::get(); + meta.segments_count += segments_count as u32; + TicketsMeta::::set(meta); + + for i in 0..segments_count { + let segment: Vec = tickets[i * segment_len..(i + 1) * segment_len] + .iter() + .map(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + *id + }) + .collect(); + let segment = BoundedVec::truncate_from(segment); + NextTicketsSegments::::insert(i as u32, segment); + } +} + +pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { + persist_next_epoch_tickets_as_segments(tickets, 1); + // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. + let next_epoch = Sassafras::next_epoch(); + assert_eq!(TicketsMeta::::get().segments_count, 1); + Sassafras::slot_ticket(next_epoch.start_slot).unwrap(); + assert_eq!(TicketsMeta::::get().segments_count, 0); +} + fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { let mut epoch = Sassafras::epoch_index(); let mut randomness = Sassafras::randomness(); diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs index 6f16941c99e80..b70748a7f153c 100644 --- a/frame/sassafras/src/session.rs +++ b/frame/sassafras/src/session.rs @@ -22,8 +22,8 @@ use frame_support::traits::{EstimateNextSessionRotation, Hooks, OneSessionHandle use pallet_session::ShouldEndSession; use sp_runtime::{traits::SaturatedConversion, Permill}; -impl ShouldEndSession for Pallet { - fn should_end_session(now: T::BlockNumber) -> bool { +impl ShouldEndSession> for Pallet { + fn should_end_session(now: BlockNumberFor) -> bool { // It might be (and it is in current implementation) that session module is calling // `should_end_session` from it's own `on_initialize` handler, in which case it's // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we @@ -74,12 +74,12 @@ impl OneSessionHandler for Pallet { } } -impl EstimateNextSessionRotation for Pallet { - fn average_session_length() -> T::BlockNumber { +impl EstimateNextSessionRotation> for Pallet { + fn average_session_length() -> BlockNumberFor { T::EpochDuration::get().saturated_into() } - fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + fn estimate_current_session_progress(_now: BlockNumberFor) -> (Option, Weight) { let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; let progress = Permill::from_rational(*elapsed, T::EpochDuration::get()); @@ -100,11 +100,13 @@ impl EstimateNextSessionRotation for Pallet { // // This implementation is linked to how [`should_session_change`] is working. This might need // to be updated accordingly, if the underlying mechanics of slot and epochs change. - fn estimate_next_session_rotation(now: T::BlockNumber) -> (Option, Weight) { + fn estimate_next_session_rotation( + now: BlockNumberFor, + ) -> (Option>, Weight) { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. - let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + let blocks_remaining: BlockNumberFor = slots_remaining.saturated_into(); now.saturating_add(blocks_remaining) }); diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 4e0bec18de3c2..674e30d121c7f 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -39,10 +39,11 @@ fn genesis_values_assumptions_check() { }); } +// Tests if the sorted tickets are assigned to each slot outside-in. #[test] -fn slot_ticket_id_fetch() { +fn slot_ticket_id_outside_in_fetch() { let genesis_slot = Slot::from(100); - let max_tickets: u32 = ::MaxTickets::get(); + let max_tickets: u32 = ::MaxTickets::get(); assert_eq!(max_tickets, 6); // Current epoch tickets @@ -114,7 +115,7 @@ fn slot_ticket_id_fetch() { #[test] fn on_first_block_after_genesis() { - let (pairs, mut ext) = new_test_ext_with_pairs(4); + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); ext.execute_with(|| { let start_slot = Slot::from(100); @@ -149,7 +150,7 @@ fn on_first_block_after_genesis() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("eb169de47822691578f74204ace5bc57c38f86f97e15a8abf71114541e7ca9e8"), + h2b("7ca54f761c6ec87503367cb3418740b8bab9796f861b9b1cb4945344bd5e87ca"), ); // Header data check @@ -172,13 +173,12 @@ fn on_first_block_after_genesis() { #[test] fn on_normal_block() { - let (pairs, mut ext) = new_test_ext_with_pairs(4); + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = Slot::from(100); + let start_block = 1; + let end_block = start_block + 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - let end_block = start_block + 1; - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We don't want to trigger an epoch change in this test. @@ -201,7 +201,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("eb169de47822691578f74204ace5bc57c38f86f97e15a8abf71114541e7ca9e8"), + h2b("7ca54f761c6ec87503367cb3418740b8bab9796f861b9b1cb4945344bd5e87ca"), ); let header = finalize_block(end_block); @@ -219,7 +219,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("c5e06d78bf5351b3a740c6838976e571ee14c595a206278f3f4ce0157f538318"), + h2b("ec9ccd9bf272de069b0e51089e7182008ed7edef3ed878bb703e9e8945ead5ed"), ); // Header data check @@ -230,8 +230,8 @@ fn on_normal_block() { } #[test] -fn produce_epoch_change_digest() { - let (pairs, mut ext) = new_test_ext_with_pairs(4); +fn produce_epoch_change_digest_no_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); ext.execute_with(|| { let start_slot = Slot::from(100); @@ -257,12 +257,12 @@ fn produce_epoch_change_digest() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("a7abdd705eb72383f60f6f093dea4bbfb65a1992099b4928ca30076f71a73682"), + h2b("85b976e3d66ecba38053d508dbccf1a17b36958fd2c2888669e439671f9b4e09"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("a9d8fc258ba0274d7815664b4e153904c44d2e850e98cffc0ba03ea018611348"), + h2b("f98d9bcc7f368068c93a68f8c1eb016a15612916bda89443eda9921b8402af4c"), ); let header = finalize_block(end_block); @@ -279,12 +279,12 @@ fn produce_epoch_change_digest() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("a7abdd705eb72383f60f6f093dea4bbfb65a1992099b4928ca30076f71a73682"), + h2b("85b976e3d66ecba38053d508dbccf1a17b36958fd2c2888669e439671f9b4e09"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("53b4e087baba183a2973552ba57b6c8f489959c8e5f838d59884d37c6d494e2f"), + h2b("7e3439ef345329ca6cc0e0b1f31cfb28b462540db2258e5c7c61e4d1f366013b"), ); // Header data check @@ -306,7 +306,7 @@ fn produce_epoch_change_digest() { #[test] fn produce_epoch_change_digest_with_config() { - let (pairs, mut ext) = new_test_ext_with_pairs(4); + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); ext.execute_with(|| { let start_slot = Slot::from(100); @@ -345,89 +345,79 @@ fn produce_epoch_change_digest_with_config() { // TODO davxy: create a read_tickets method which reads pre-constructed good tickets // from a file. Creating this stuff "on-the-fly" is just too much expensive +// +// A valid ring-context is required for this test since we are passing though the +// `submit_ticket` call which tests for ticket validity. #[test] -fn submit_segments_works() { - let (pairs, mut ext) = new_test_ext_with_pairs(1); +fn submit_tickets_with_ring_proof_check_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(10, true); let pair = &pairs[0]; - // We're going to generate 14 segments. let segments_count = 3; - let ring_ctx = RingVrfContext::new_testing(); - ext.execute_with(|| { let start_slot = Slot::from(100); let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); - - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + let attempts_number = segments_count * max_tickets; // Tweak the epoch config to discard some of the tickets let mut config = EpochConfig::::get(); - config.redundancy_factor = 2; + config.redundancy_factor = 7; + config.attempts_number = attempts_number; EpochConfig::::set(config); - RingContext::::set(Some(ring_ctx.clone())); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, + ); // Populate the segments via the `submit_tickets` - let tickets = make_tickets(start_slot + 1, segments_count * max_tickets, pair); + let tickets = make_tickets(attempts_number, pair); let segment_len = tickets.len() / segments_count as usize; for i in 0..segments_count as usize { + println!("Submit tickets"); let segment = tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); } + // Check state after submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count, tickets_count: [0, 0] }, + ); + finalize_block(start_block); // Check against the expected results given the known inputs - let meta = TicketsMeta::::get(); - assert_eq!(meta.segments_count, segments_count); - assert_eq!(meta.tickets_count, [0, 0]); - let seg = NextTicketsSegments::::get(0); - assert_eq!(seg.len(), 3); + assert_eq!(NextTicketsSegments::::get(0).len(), 2); let seg = NextTicketsSegments::::get(1); - assert_eq!(seg.len(), 5); + assert_eq!(seg.len(), 3); let seg = NextTicketsSegments::::get(2); - assert_eq!(seg.len(), 5); + assert_eq!(seg.len(), 2); }) } #[test] -fn segments_incremental_sortition_works() { - let (pairs, mut ext) = new_test_ext_with_pairs(1); +fn segments_incremental_sort_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); let pair = &pairs[0]; let segments_count = 14; - - let ring_ctx = RingVrfContext::new_testing(); + let start_slot = Slot::from(100); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); - - RingContext::::set(Some(ring_ctx.clone())); + let tickets_count = segments_count * max_tickets; initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - // Manually populate the segments to fool the threshold check - let tickets = make_tickets(start_slot + 1, segments_count * max_tickets, pair); - let segment_len = tickets.len() / segments_count as usize; - - for i in 0..segments_count as usize { - let segment: Vec = tickets[i * segment_len..(i + 1) * segment_len] - .iter() - .enumerate() - .map(|(j, ticket)| { - let ticket_id = (i * segment_len + j) as TicketId; - TicketsData::::set(ticket_id, ticket.body.clone()); - ticket_id - }) - .collect(); - let segment = BoundedVec::truncate_from(segment); - NextTicketsSegments::::insert(i as u32, segment); - } - let meta = TicketsMetadata { segments_count, tickets_count: [0, 0] }; - TicketsMeta::::set(meta); + // Manually populate the segments to skip the threshold check + let mut tickets = make_ticket_bodies(tickets_count, pair); + persist_next_epoch_tickets_as_segments(&tickets, segments_count as usize); let epoch_duration: u64 = ::EpochDuration::get(); @@ -464,13 +454,28 @@ fn segments_incremental_sortition_works() { let header = finalize_block(half_epoch_block + 4); - // Sort should be finished. - // Check that next epoch tickets count have the correct value (6). - // Bigger values were discarded during sortition. + // Sort should be finished now. + // Check that next epoch tickets count have the correct value. + // Bigger ticket ids were discarded during sortition. let meta = TicketsMeta::::get(); assert_eq!(meta.segments_count, 0); - assert_eq!(meta.tickets_count, [0, 6]); + assert_eq!(meta.tickets_count, [0, max_tickets]); assert_eq!(header.digest.logs.len(), 1); + // No tickets for the current epoch + assert_eq!(TicketsIds::::get((0, 0)), None); + + // Check persistence of good tickets + tickets.sort_by_key(|t| t.0); + (0..max_tickets as usize).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), tickets[i]); + }); + // Check removal of bad tickets + (max_tickets as usize..tickets.len()).into_iter().for_each(|i| { + assert!(TicketsIds::::get((1, i as u32)).is_none()); + assert!(TicketsData::::get(tickets[i].0).is_none()); + }); // The next block will be the first produced on the new epoch, // At this point the tickets are found already sorted and ready to be used. @@ -484,20 +489,14 @@ fn segments_incremental_sortition_works() { } #[test] -fn submit_enact_claim_tickets() { - use sp_core::crypto::VrfSecret; - - let (pairs, mut ext) = new_test_ext_with_pairs(4); - - let ring_ctx = RingVrfContext::new_testing(); +fn tickets_fetch_works_after_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; let max_tickets: u32 = ::MaxTickets::get(); - let pair = &pairs[0]; - - RingContext::::set(Some(ring_ctx.clone())); initialize_block(start_block, start_slot, Default::default(), pair); @@ -506,56 +505,25 @@ fn submit_enact_claim_tickets() { assert!(epoch_duration > 2); progress_to_block(2, &pairs[0]).unwrap(); - // // Check state before tickets submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, - ); - - // Submit authoring tickets in three different segments. - let tickets = make_tickets(start_slot + 1, 3 * max_tickets, pair); - let tickets0 = tickets[0..6].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(RuntimeOrigin::none(), tickets0).unwrap(); - let tickets1 = tickets[6..12].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(RuntimeOrigin::none(), tickets1).unwrap(); - let tickets2 = tickets[12..18].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(RuntimeOrigin::none(), tickets2).unwrap(); - - // Check state after submit - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { segments_count: 3, tickets_count: [0, 0] }, - ); + // Persist tickets as three different segments. + let tickets = make_ticket_bodies(3 * max_tickets, pair); + persist_next_epoch_tickets_as_segments(&tickets, 3); // Progress up to the last epoch slot (do not enact epoch change) progress_to_block(epoch_duration, &pairs[0]).unwrap(); - // At this point next tickets should have been sorted - // Check state after submit + // At this point next tickets should have been sorted and ready to be used assert_eq!( TicketsMeta::::get(), TicketsMetadata { segments_count: 0, tickets_count: [0, 6] }, ); // Compute and sort the tickets ids (aka tickets scores) - let mut expected_ids: Vec<_> = tickets - .iter() - .map(|ticket| { - let epoch_idx = Sassafras::epoch_index() + 1; - let randomness = Sassafras::next_randomness(); - let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( - &randomness, - ticket.body.attempt_idx, - epoch_idx, - ); - let vrf_output = pair.as_ref().vrf_output(&vrf_input); - sp_consensus_sassafras::ticket_id(&vrf_input, &vrf_output) - }) - .collect(); + let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); expected_ids.sort(); expected_ids.truncate(max_tickets as usize); - // Check if we can claim next epoch tickets in outside-in fashion. + // Check if we can fetch next epoch tickets ids (outside-in). let slot = Sassafras::current_slot(); assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); @@ -575,6 +543,7 @@ fn submit_enact_claim_tickets() { assert_eq!(meta.segments_count, 0); assert_eq!(meta.tickets_count, [0, 6]); + // Check if we can fetch thisepoch tickets ids (outside-in). let slot = Sassafras::current_slot(); assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); @@ -590,34 +559,22 @@ fn submit_enact_claim_tickets() { #[test] fn block_allowed_to_skip_epochs() { - let (pairs, mut ext) = new_test_ext_with_pairs(4); - - let ring_ctx = RingVrfContext::new_testing(); + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; let epoch_duration: u64 = ::EpochDuration::get(); - RingContext::::set(Some(ring_ctx.clone())); - - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - - let tickets = make_tickets(start_slot + 1, 3, &pairs[0]); - Sassafras::submit_tickets( - RuntimeOrigin::none(), - BoundedVec::truncate_from(tickets.clone()), - ) - .unwrap(); + initialize_block(start_block, start_slot, Default::default(), pair); - // Force sortition of next tickets (enactment) by explicitly querying next epoch tickets. - assert_eq!(TicketsMeta::::get().segments_count, 1); - Sassafras::slot_ticket(start_slot + epoch_duration).unwrap(); - assert_eq!(TicketsMeta::::get().segments_count, 0); + let tickets = make_ticket_bodies(3, pair); + persist_next_epoch_tickets(&tickets); let next_random = NextRandomness::::get(); - // We want to trigger a skip epoch in this test. + // We want to skip 2 epochs in this test. let offset = 3 * epoch_duration; go_to_block(start_block + offset, start_slot + offset, &pairs[0]); @@ -630,10 +587,79 @@ fn block_allowed_to_skip_epochs() { assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); assert_eq!(Sassafras::current_slot_index(), 0); - // Tickets were discarded + // Tickets data has been discarded let meta = TicketsMeta::::get(); assert_eq!(meta, TicketsMetadata::default()); + + tickets.iter().for_each(|(id, _)| { + let data = TicketsData::::get(id); + assert!(data.is_none()); + }); // We used the last known next epoch randomness as a fallback assert_eq!(next_random, Sassafras::randomness()); }); } + +#[test] +fn obsolete_tickets_are_removed_on_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_duration: u64 = ::EpochDuration::get(); + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(10, pair); + let mut epoch1_tickets = tickets[..4].to_vec(); + let mut epoch2_tickets = tickets[4..].to_vec(); + + // Persist some tickets for next epoch (N) + persist_next_epoch_tickets(&epoch1_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + // Check next epoch tickets presence + epoch1_tickets.sort_by_key(|t| t.0); + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + + // Advance one epoch to enact the tickets + go_to_block(start_block + epoch_duration, start_slot + epoch_duration, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + + // Persist some tickets for next epoch (N+1) + persist_next_epoch_tickets(&epoch2_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); + epoch2_tickets.sort_by_key(|t| t.0); + // Check for this epoch and next epoch tickets presence + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + + // Advance to epoch 2 and check for cleanup + + go_to_block(start_block + 2 * epoch_duration, start_slot + 2 * epoch_duration, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + assert!(TicketsData::::get(id).is_none()); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + }) +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 45e585c72e93c..c3df3ab675bcf 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -33,7 +33,7 @@ use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; pub use sp_core::bandersnatch::{ - ring_vrf::{RingContext, RingProver, RingVerifier}, + ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, }; diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index c8f95b165c6d9..6ba2a34a99241 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -17,14 +17,10 @@ //! Primitives related to tickets. -use super::{Randomness, SASSAFRAS_ENGINE_ID}; +use crate::{Randomness, RingVrfSignature, VrfInput, VrfOutput, VrfSignData, SASSAFRAS_ENGINE_ID}; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_consensus_slots::Slot; -use sp_core::bandersnatch::{ - ring_vrf::RingVrfSignature, - vrf::{VrfInput, VrfOutput, VrfSignData}, -}; /// Ticket identifier. /// @@ -33,7 +29,7 @@ use sp_core::bandersnatch::{ pub type TicketId = u128; /// Ticket data persisted on-chain. -#[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketBody { /// Attempt index. pub attempt_idx: u32, diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index c3ba7f41058e9..d87b3ee232df9 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -31,7 +31,7 @@ use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretString use bandersnatch_vrfs::CanonicalSerialize; #[cfg(feature = "full_crypto")] use bandersnatch_vrfs::SecretKey; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::PassByInner; @@ -294,7 +294,7 @@ impl TraitPair for Pair { fn verify>(signature: &Signature, data: M, public: &Public) -> bool { let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data.as_ref()], None); let signature = - vrf::VrfSignature { signature: *signature, vrf_outputs: vrf::VrfIosVec::default() }; + vrf::VrfSignature { signature: *signature, outputs: vrf::VrfIosVec::default() }; public.vrf_verify(&data, &signature) } @@ -463,7 +463,7 @@ pub mod vrf { #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { /// VRF (pre)outputs. - pub vrf_outputs: VrfIosVec, + pub outputs: VrfIosVec, /// VRF signature. pub signature: Signature, } @@ -506,12 +506,12 @@ pub mod vrf { impl VrfPublic for Public { fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); - let preouts_len = signature.vrf_outputs.len(); - if preouts_len != data.vrf_inputs.len() { + let outputs_len = signature.outputs.len(); + if outputs_len != data.vrf_inputs.len() { return false } // Workaround to overcome backend signature generic over the number of IOs. - match preouts_len { + match outputs_len { 0 => self.vrf_verify_gen::<0>(data, signature), 1 => self.vrf_verify_gen::<1>(data, signature), 2 => self.vrf_verify_gen::<2>(data, signature), @@ -541,7 +541,7 @@ pub mod vrf { let outputs: Vec<_> = signature.preoutputs.into_iter().map(VrfOutput).collect(); let outputs = VrfIosVec::truncate_from(outputs); - VrfSignature { signature: Signature(sign_bytes), vrf_outputs: outputs } + VrfSignature { signature: Signature(sign_bytes), outputs } } /// Generate an arbitrary number of bytes from the given `context` and VRF `input`. @@ -567,7 +567,7 @@ pub mod vrf { }; let Ok(preouts) = signature - .vrf_outputs + .outputs .iter() .map(|o| o.0.clone()) .collect::>() @@ -675,6 +675,8 @@ pub mod ring_vrf { } } + impl EncodeLike for RingContext {} + impl MaxEncodedLen for RingContext { fn max_encoded_len() -> usize { <[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len() @@ -910,11 +912,11 @@ mod tests { let signature = pair.vrf_sign(&data); let o10 = pair.make_bytes::<32>(b"ctx1", &i1); - let o11 = signature.vrf_outputs[0].make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); assert_eq!(o10, o11); let o20 = pair.make_bytes::<48>(b"ctx2", &i2); - let o21 = signature.vrf_outputs[1].make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); assert_eq!(o20, o21); } @@ -993,6 +995,35 @@ mod tests { assert!(!signature.verify(&data, &verifier)); } + #[test] + fn ring_vrf_make_bytes_matches() { + let ring_ctx = RingContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + + // Just pick one index to patch with the actual public key + let prover_idx = 3; + pks[prover_idx] = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let signature = pair.ring_vrf_sign(&data, &prover); + + let o10 = pair.make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); + assert_eq!(o10, o11); + + let o20 = pair.make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); + assert_eq!(o20, o21); + } + #[test] fn encode_decode_ring_vrf_signature() { let ring_ctx = RingContext::new_testing(); From 677a095572e095d2f6035753a8d92e44a1b6ce99 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 11 Aug 2023 15:04:22 +0200 Subject: [PATCH 47/62] Begin fix of client --- client/consensus/sassafras/src/authorship.rs | 4 +-- client/consensus/sassafras/src/lib.rs | 2 +- .../consensus/sassafras/src/verification.rs | 8 +++--- primitives/consensus/sassafras/src/ticket.rs | 1 + test-utils/runtime/src/genesismap.rs | 26 ++++++++++++------- 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index c2a38e1e58e33..cafe60af572b3 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -24,7 +24,7 @@ use sp_consensus_sassafras::{ digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, TicketEnvelope, TicketId, }; -use sp_core::{bandersnatch::ring_vrf::RingVrfContext, ed25519, twox_64, ByteArray}; +use sp_core::{bandersnatch::ring_vrf::RingContext, ed25519, twox_64, ByteArray}; use std::pin::Pin; /// Get secondary authority index for the given epoch and slot. @@ -97,7 +97,7 @@ pub(crate) fn claim_slot( fn generate_epoch_tickets( epoch: &mut Epoch, keystore: &KeystorePtr, - ring_ctx: &RingVrfContext, + ring_ctx: &RingContext, ) -> Vec { let config = &epoch.config; let max_attempts = config.threshold_params.attempts_number; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index aafea3acbfcec..a34746af78d1c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -64,7 +64,7 @@ use sp_consensus::{ BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; use sp_consensus_slots::Slot; -use sp_core::{ExecutionContext, Pair}; +use sp_core::Pair; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::KeystorePtr; use sp_runtime::{ diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 4206ef195c5d0..c16df446ab4ca 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -77,7 +77,7 @@ fn check_header( } let Some(authority_id) = config.authorities.get(pre_digest.authority_idx as usize) else { - return Err(sassafras_err(Error::SlotAuthorNotFound)); + return Err(sassafras_err(Error::SlotAuthorNotFound)) }; // Check header signature (aka the Seal) @@ -188,12 +188,11 @@ where at_hash: Block::Hash, inherent_data: InherentData, create_inherent_data_providers: CIDP::InherentDataProviders, - execution_context: ExecutionContext, ) -> Result<(), Error> { let inherent_res = self .client .runtime_api() - .check_inherents_with_context(at_hash, execution_context, block, inherent_data) + .check_inherents(at_hash, block, inherent_data) .map_err(Error::RuntimeApi)?; if !inherent_res.ok() { @@ -415,7 +414,7 @@ where let new_block = Block::new(pre_header.clone(), inner_body); if !block.state_action.skip_execution_checks() { - // TODO-SASS-P3 :??? DOC + // TODO-SASS-P3 : @davxy??? DOC let mut inherent_data = create_inherent_data_providers .create_inherent_data() .await @@ -426,7 +425,6 @@ where parent_hash, inherent_data, create_inherent_data_providers, - block.origin.into(), ) .await?; } diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 6ba2a34a99241..842067d42e92d 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -21,6 +21,7 @@ use crate::{Randomness, RingVrfSignature, VrfInput, VrfOutput, VrfSignData, SASS use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_consensus_slots::Slot; +use sp_std::vec::Vec; /// Ticket identifier. /// diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 136c511f8ed66..580fa6a923ed6 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -107,18 +107,26 @@ impl GenesisStorageBuilder { self } -<<<<<<< HEAD - /// Builds the `GenesisConfig` and returns its storage. - pub fn build(self) -> Storage { - let authorities_sr25519: Vec = - self.authorities.clone().into_iter().map(|id| id.into()).collect(); - - let authorities_bandersnatch: Vec = self -======= + // <<<<<<< HEAD + // /// Builds the `GenesisConfig` and returns its storage. + // pub fn build(self) -> Storage { + // let authorities_sr25519: Vec = + // self.authorities.clone().into_iter().map(|id| id.into()).collect(); + + // let authorities_bandersnatch: Vec = self + // ======= /// A `RuntimeGenesisConfig` from internal configuration pub fn genesis_config(&self) -> RuntimeGenesisConfig { let authorities_sr25519: Vec<_> = self ->>>>>>> master + .authorities + .iter() + .map(|id| { + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + sp_keyring::Sr25519Keyring::from_str(&seed).unwrap().into() + }) + .collect(); + let authorities_bandersnatch: Vec = self .authorities .iter() .map(|id| { From 049ac84f54ce98ea5a4ad8cb222b17b6cc72bcf0 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 11 Aug 2023 17:35:21 +0200 Subject: [PATCH 48/62] Substrate fix test utils --- client/service/Cargo.toml | 2 +- primitives/keyring/src/sr25519.rs | 16 ++++++++-------- primitives/keystore/src/lib.rs | 1 + test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/src/genesismap.rs | 12 +++--------- test-utils/runtime/src/lib.rs | 15 ++++++++++++--- .../src/test_json/default_genesis_config.json | 11 +++++++++++ .../default_genesis_config_incomplete.json | 11 +++++++++++ .../default_genesis_config_invalid.json | 11 +++++++++++ 9 files changed, 59 insertions(+), 22 deletions(-) diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 9ad2fcf778f2d..643a2e20778c0 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -36,7 +36,7 @@ exit-future = "0.2.0" pin-project = "1.0.12" serde = "1.0.163" serde_json = "1.0.85" -sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sc-keystore = { version = "4.0.0-dev", path = "../keystore", features = ["bandersnatch-experimental"] } sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-trie = { version = "22.0.0", path = "../../primitives/trie" } sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index c738cfdc59d9e..c990b7b796b5d 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -140,14 +140,14 @@ impl std::str::FromStr for Keyring { fn from_str(s: &str) -> Result::Err> { match s { - "alice" => Ok(Keyring::Alice), - "bob" => Ok(Keyring::Bob), - "charlie" => Ok(Keyring::Charlie), - "dave" => Ok(Keyring::Dave), - "eve" => Ok(Keyring::Eve), - "ferdie" => Ok(Keyring::Ferdie), - "one" => Ok(Keyring::One), - "two" => Ok(Keyring::Two), + "Alice" => Ok(Keyring::Alice), + "Bob" => Ok(Keyring::Bob), + "Charlie" => Ok(Keyring::Charlie), + "Dave" => Ok(Keyring::Dave), + "Eve" => Ok(Keyring::Eve), + "Ferdie" => Ok(Keyring::Ferdie), + "One" => Ok(Keyring::One), + "Two" => Ok(Keyring::Two), _ => Err(ParseKeyringError), } } diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 82062fe7b40a7..b388362ecb898 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -17,6 +17,7 @@ //! Keystore traits +#[cfg(feature = "std")] pub mod testing; #[cfg(feature = "bandersnatch-experimental")] diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index c21d630f63f64..5b82f30e2bef7 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -22,7 +22,7 @@ sp-block-builder = { version = "4.0.0-dev", default-features = false, path = ".. codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring" } +sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring", features = ["bandersnatch-experimental"] } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 580fa6a923ed6..c2a6698ee6265 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -107,22 +107,15 @@ impl GenesisStorageBuilder { self } - // <<<<<<< HEAD - // /// Builds the `GenesisConfig` and returns its storage. - // pub fn build(self) -> Storage { - // let authorities_sr25519: Vec = - // self.authorities.clone().into_iter().map(|id| id.into()).collect(); - - // let authorities_bandersnatch: Vec = self - // ======= /// A `RuntimeGenesisConfig` from internal configuration pub fn genesis_config(&self) -> RuntimeGenesisConfig { - let authorities_sr25519: Vec<_> = self + let authorities_sr25519: Vec = self .authorities .iter() .map(|id| { use std::str::FromStr; let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + println!(">>>>> {}", seed); sp_keyring::Sr25519Keyring::from_str(&seed).unwrap().into() }) .collect(); @@ -156,6 +149,7 @@ impl GenesisStorageBuilder { redundancy_factor: 1, attempts_number: 32, }, + ..Default::default() }, substrate_test: substrate_test_pallet::GenesisConfig { authorities: authorities_sr25519.clone(), diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index aea68a49660d9..4a9c2d0b9e701 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -694,7 +694,7 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn ring_context() -> Option { + fn ring_context() -> Option { Sassafras::ring_context() } @@ -1343,7 +1343,7 @@ mod tests { #[test] fn build_minimal_genesis_config_works() { sp_tracing::try_init_simple(); - let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor": 1,"attempts_number": 32}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; let mut t = BasicExternalities::new_empty(); executor_call(&mut t, "GenesisBuilder_build_config", &default_minimal_json.encode()) @@ -1386,6 +1386,15 @@ mod tests { "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", + + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", ].into_iter().map(String::from).collect::>(); expected.sort(); @@ -1400,7 +1409,7 @@ mod tests { let r = Vec::::decode(&mut &r[..]).unwrap(); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor":0,"attempts_number":0}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; assert_eq!(expected.to_string(), json); } diff --git a/test-utils/runtime/src/test_json/default_genesis_config.json b/test-utils/runtime/src/test_json/default_genesis_config.json index b0218d417daa5..1d322237c33fe 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config.json +++ b/test-utils/runtime/src/test_json/default_genesis_config.json @@ -25,6 +25,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", diff --git a/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json index e25730ee11cf0..4965136fd1ec0 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json +++ b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json @@ -18,6 +18,17 @@ "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" ] }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "balances": { "balances": [ [ diff --git a/test-utils/runtime/src/test_json/default_genesis_config_invalid.json b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json index 00550efaeec9f..ae0c9f986c29e 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config_invalid.json +++ b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json @@ -25,6 +25,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", From 50ab0d0a7bb2ebf8f077b3ddcc358e8a192cb034 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 14:53:03 +0200 Subject: [PATCH 49/62] Fix sassafras node --- Cargo.lock | 2 + bin/node-sassafras/node/Cargo.toml | 4 +- bin/node-sassafras/node/src/chain_spec.rs | 3 + bin/node-sassafras/node/src/command.rs | 12 ++-- bin/node-sassafras/node/src/rpc.rs | 4 +- bin/node-sassafras/node/src/service.rs | 68 +++++++++++++++-------- bin/node-sassafras/runtime/Cargo.toml | 2 +- bin/node-sassafras/runtime/src/lib.rs | 28 ++++------ 8 files changed, 70 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 701de62cd1c56..d79eeb3430b91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5338,6 +5338,7 @@ dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", "frame-system", + "futures", "jsonrpsee", "node-sassafras-runtime", "pallet-transaction-payment", @@ -5351,6 +5352,7 @@ dependencies = [ "sc-executor", "sc-keystore", "sc-network", + "sc-offchain", "sc-rpc", "sc-rpc-api", "sc-service", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index f7226797957d0..cec3cd8dbb0a5 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "node-sassafras" version = "0.3.3-dev" -authors = ["Parity Technologies "] +authors = ["Parity Technologies ", "Davide Galassi "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" edition = "2021" @@ -17,6 +17,7 @@ name = "node-sassafras" [dependencies] clap = { version = "4.0.9", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"]} sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } sp-core = { version = "21.0.0", path = "../../../primitives/core" } @@ -27,6 +28,7 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" } sc-consensus-sassafras = { version = "0.3.3-dev", path = "../../../client/consensus/sassafras" } sp-consensus-sassafras = { version = "0.3.3-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 46b67d9306016..0cb9e31e9df4a 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -120,6 +120,7 @@ fn testnet_genesis( system: SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), + ..Default::default() }, balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. @@ -134,12 +135,14 @@ fn testnet_genesis( attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, }, + ..Default::default() }, grandpa: GrandpaConfig { #[cfg(feature = "use-session-pallet")] authorities: vec![], #[cfg(not(feature = "use-session-pallet"))] authorities: initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect(), + ..Default::default() }, sudo: SudoConfig { // Assign network admin rights. diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs index 4c37820b3c9bb..187b266f9c4d4 100644 --- a/bin/node-sassafras/node/src/command.rs +++ b/bin/node-sassafras/node/src/command.rs @@ -5,7 +5,7 @@ use crate::{ }; use frame_benchmarking_cli::BenchmarkCmd; use node_sassafras_runtime::Block; -use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; +use sc_cli::SubstrateCli; use sc_service::PartialComponents; impl SubstrateCli for Cli { @@ -30,7 +30,7 @@ impl SubstrateCli for Cli { } fn copyright_start_year() -> i32 { - 2022 + 2023 } fn load_spec(&self, id: &str) -> Result, String> { @@ -41,10 +41,6 @@ impl SubstrateCli for Cli { Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &node_sassafras_runtime::VERSION - } } /// Parse and run command line arguments @@ -120,10 +116,10 @@ pub fn run() -> sc_cli::Result<()> { ) } - cmd.run::(config) + cmd.run::(config) }, _ => { - println!("Not implemented..."); + eprintln!("Not implemented..."); Ok(()) }, } diff --git a/bin/node-sassafras/node/src/rpc.rs b/bin/node-sassafras/node/src/rpc.rs index 4964c5c15fc06..72c7b3d69ba12 100644 --- a/bin/node-sassafras/node/src/rpc.rs +++ b/bin/node-sassafras/node/src/rpc.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_sassafras_runtime::{opaque::Block, AccountId, Balance, Index}; +use node_sassafras_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; @@ -34,7 +34,7 @@ where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata + 'static, C: Send + Sync + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 6a21e5c538df2..197850d84ab19 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -1,11 +1,13 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +use futures::FutureExt; use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::BlockBackend; +use sc_client_api::{Backend, BlockBackend}; use sc_consensus_grandpa::SharedVoterState; pub use sc_executor::NativeElseWasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use std::{sync::Arc, time::Duration}; // Our native executor instance. @@ -36,6 +38,10 @@ type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = sc_consensus_grandpa::GrandpaBlockImport; +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + pub fn new_partial( config: &Configuration, ) -> Result< @@ -92,7 +98,8 @@ pub fn new_partial( let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( client.clone(), - &(client.clone() as Arc<_>), + GRANDPA_JUSTIFICATION_PERIOD, + &client, select_chain.clone(), telemetry.as_ref().map(|x| x.handle()), )?; @@ -184,11 +191,23 @@ pub fn new_full(config: Configuration) -> Result { })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), ); } @@ -268,29 +287,29 @@ pub fn new_full(config: Configuration) -> Result { ); } - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = role.is_authority().then(|| keystore_container.keystore()); - - let grandpa_config = sc_consensus_grandpa::Config { - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name: grandpa_protocol_name, - }; - if enable_grandpa { + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = role.is_authority().then(|| keystore_container.keystore()); + + let grandpa_config = sc_consensus_grandpa::Config { + gossip_duration: Duration::from_millis(333), + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + // start the full GRANDPA voter // NOTE: non-authorities could run the GRANDPA observer protocol, but at // this point the full voter should provide better guarantees of block // and vote data availability than the observer. The observer has not // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. - let grandpa_config = sc_consensus_grandpa::GrandpaParams { + let grandpa_params = sc_consensus_grandpa::GrandpaParams { config: grandpa_config, link: grandpa_link, network, @@ -299,6 +318,7 @@ pub fn new_full(config: Configuration) -> Result { prometheus_registry, shared_voter_state: SharedVoterState::empty(), telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), }; // the GRANDPA voter task is considered infallible, i.e. @@ -306,7 +326,7 @@ pub fn new_full(config: Configuration) -> Result { task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", None, - sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?, + sc_consensus_grandpa::run_grandpa_voter(grandpa_params)?, ); } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index a5f9e53ac9dfa..d0c1e86ca3204 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "node-sassafras-runtime" version = "0.3.3-dev" -authors = ["Parity Technologies "] +authors = ["Parity Technologies ","Davide Galassi "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" edition = "2021" diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 7d5062105d0f8..683208d158236 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -41,9 +41,6 @@ pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; -/// Index of a transaction in the chain. -pub type Index = u32; - /// A hash of some data used by the chain. pub type Hash = sp_core::H256; @@ -79,6 +76,9 @@ pub type Address = sp_runtime::MultiAddress; /// Balance of an account. pub type Balance = u128; +/// Index of a transaction in the chain. +pub type Nonce = u32; + /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; @@ -171,16 +171,15 @@ parameter_types! { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type AccountId = AccountId; type RuntimeCall = RuntimeCall; type Lookup = AccountIdLookup; - type Index = Index; - type BlockNumber = BlockNumber; + type Nonce = Nonce; type Hash = Hash; type Hashing = BlakeTwo256; - type Header = generic::Header; type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; @@ -211,6 +210,7 @@ impl pallet_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = ConstU32; + type MaxNominators = ConstU32<0>; type MaxSetIdSessionEntries = ConstU64<0>; type KeyOwnerProof = sp_core::Void; type EquivocationReportSystem = (); @@ -270,10 +270,7 @@ impl pallet_session::Config for Runtime { // Create a runtime using session pallet #[cfg(feature = "use-session-pallet")] construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Runtime { System: frame_system, Timestamp: pallet_timestamp, @@ -289,10 +286,7 @@ construct_runtime!( // Create a runtime NOT using session pallet #[cfg(not(feature = "use-session-pallet"))] construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic + pub enum Runtime { System: frame_system, Timestamp: pallet_timestamp, @@ -387,7 +381,7 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn ring_context() -> Option { + fn ring_context() -> Option { Sassafras::ring_context() } @@ -469,8 +463,8 @@ impl_runtime_apis! { } } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } From 2aec295f691cbca37e4fe294ee53fb8c79a2825e Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 15:28:42 +0200 Subject: [PATCH 50/62] Improve pallet test wrt body equality assertions --- frame/sassafras/src/mock.rs | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 6d03bb935f964..0572627b39b43 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -195,18 +195,35 @@ pub fn make_prover(pair: &AuthorityPair) -> RingProver { prover } +/// Construct at most `attempts` tickets envelopes for the given `slot`. +/// TODO-SASS-P3: filter out invalid tickets according to test threshold. +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} + pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { // Values are referring to the next epoch let epoch = Sassafras::epoch_index() + 1; let randomness = Sassafras::next_randomness(); - let body = TicketBody { attempt_idx, erased_public: [0; 32] }; - let input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt_idx, epoch); let output = pair.as_inner_ref().vrf_output(&input); let id = sp_consensus_sassafras::ticket_id(&input, &output); + // Make a dummy ephemeral public that hopefully is unique within a test instance + use sp_core::ByteArray; + let mut erased_public = [0; 32]; + erased_public[..16].copy_from_slice(&pair.public().as_slice()[0..16]); + erased_public[16..].copy_from_slice(&id.to_le_bytes()); + + let body = TicketBody { attempt_idx, erased_public }; + (id, body) } @@ -214,17 +231,6 @@ pub fn make_ticket_bodies(number: u32, pair: &AuthorityPair) -> Vec<(TicketId, T (0..number).into_iter().map(|i| make_ticket_body(i, pair)).collect() } -/// Construct at most `attempts` tickets envelopes for the given `slot`. -/// TODO-SASS-P3: filter out invalid tickets according to test threshold. -/// E.g. by passing an optional threshold -pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { - let prover = make_prover(pair); - (0..attempts) - .into_iter() - .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) - .collect() -} - /// Persist the given tickets in `segments_count` separated segments by appending /// them to the storage segments list. /// From 15002c16eae949119872a3bb6c2dbabfb6393fe5 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 16:14:55 +0200 Subject: [PATCH 51/62] Build ring context in pallet genesis build --- frame/sassafras/src/lib.rs | 6 ++++++ test-utils/runtime/src/genesismap.rs | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index be3c14a4ebb9a..1073be65dd180 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -255,6 +255,12 @@ pub mod pallet { fn build(&self) { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); + + // TODO: davxy... remove for tests + log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); + let ring_ctx = RingContext::new_testing(); + log::warn!(target: LOG_TARGET, "... done"); + RingVrfContext::::set(Some(ring_ctx.clone())); } } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index c2a6698ee6265..e1d8353a0e472 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -115,7 +115,6 @@ impl GenesisStorageBuilder { .map(|id| { use std::str::FromStr; let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); - println!(">>>>> {}", seed); sp_keyring::Sr25519Keyring::from_str(&seed).unwrap().into() }) .collect(); From 324531062ad05acb5e22276c6722952c92caa04e Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 16:46:44 +0200 Subject: [PATCH 52/62] Added transaction pool factory to submit tickets --- Cargo.lock | 2 ++ bin/node-sassafras/node/src/service.rs | 1 + client/consensus/sassafras/Cargo.toml | 2 ++ client/consensus/sassafras/src/authorship.rs | 17 +++++++++++++--- client/consensus/sassafras/src/lib.rs | 6 +++--- client/consensus/sassafras/src/tests.rs | 20 ++++++++++--------- .../consensus/sassafras/src/verification.rs | 2 +- 7 files changed, 34 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d79eeb3430b91..1211c0427707d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9234,6 +9234,7 @@ name = "sc-consensus-sassafras" version = "0.3.3-dev" dependencies = [ "async-trait", + "env_logger 0.10.0", "fork-tree", "futures", "log", @@ -9247,6 +9248,7 @@ dependencies = [ "sc-keystore", "sc-network-test", "sc-telemetry", + "sc-transaction-pool-api", "sp-api", "sp-application-crypto", "sp-block-builder", diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 197850d84ab19..26d01e20ff1d9 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -274,6 +274,7 @@ pub fn new_full(config: Configuration) -> Result { ); Ok((slot, timestamp)) }, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), }; let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_params)?; diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 5352129e31fd0..5db790b836da9 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } @@ -38,6 +39,7 @@ sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +env_logger = "0.10.0" [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index cafe60af572b3..ac78f7d192e36 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -20,6 +20,7 @@ use super::*; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_sassafras::{ digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, TicketEnvelope, TicketId, @@ -54,7 +55,7 @@ pub(crate) fn claim_slot( let (authority_idx, ticket_claim) = match maybe_ticket { Some((ticket_id, ticket_data)) => { - log::debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:16x})]"); + log::debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:016x})]"); let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?.clone(); log::debug!( target: LOG_TARGET, @@ -397,6 +398,7 @@ async fn start_tickets_worker( keystore: KeystorePtr, epoch_changes: SharedEpochChanges, select_chain: SC, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, ) where B: BlockT, C: BlockchainEvents + ProvideRuntimeApi, @@ -462,7 +464,12 @@ async fn start_tickets_worker( continue } - let err = match client.runtime_api().submit_tickets_unsigned_extrinsic(best_hash, tickets) { + // Register the offchain tx pool to be able to use it from the runtime. + let mut runtime_api = client.runtime_api(); + runtime_api + .register_extension(offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); + + let err = match runtime_api.submit_tickets_unsigned_extrinsic(best_hash, tickets) { Err(err) => Some(err.to_string()), Ok(false) => Some("Unknown reason".to_string()), _ => None, @@ -542,6 +549,8 @@ pub struct SassafrasWorkerParams { pub force_authoring: bool, /// State shared between import queue and authoring worker. pub sassafras_link: SassafrasLink, + /// The offchain transaction pool factory used for tickets submission. + pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, } /// Start the Sassafras worker. @@ -557,6 +566,7 @@ pub fn start_sassafras( create_inherent_data_providers, force_authoring, sassafras_link, + offchain_tx_pool_factory, }: SassafrasWorkerParams, ) -> Result, ConsensusError> where @@ -583,7 +593,7 @@ where CIDP::InherentDataProviders: InherentDataProviderExt + Send, ER: std::error::Error + Send + From + From + 'static, { - info!(target: LOG_TARGET, "🍁 Starting Sassafras Authorship worker"); + info!(target: LOG_TARGET, "🍁 Starting authorship worker"); let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); @@ -613,6 +623,7 @@ where keystore, sassafras_link.epoch_changes.clone(), select_chain, + offchain_tx_pool_factory, ); let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index a34746af78d1c..2ee1502bd580c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -105,13 +105,13 @@ pub const INTERMEDIATE_KEY: &[u8] = b"sass1"; #[derive(Debug, thiserror::Error)] pub enum Error { /// Multiple Sassafras pre-runtime digests - #[error("Multiple Sassafras pre-runtime digests")] + #[error("Multiple pre-runtime digests")] MultiplePreRuntimeDigests, /// No Sassafras pre-runtime digest found - #[error("No Sassafras pre-runtime digest found")] + #[error("No pre-runtime digest found")] NoPreRuntimeDigest, /// Multiple Sassafras epoch change digests - #[error("Multiple Sassafras epoch change digests")] + #[error("Multiple epoch change digests")] MultipleEpochChangeDigests, /// Could not fetch epoch #[error("Could not fetch epoch at {0:?}")] diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 6756fef2f5660..ebfda47018f71 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -31,6 +31,7 @@ use sc_block_builder::BlockBuilderProvider; use sc_client_api::Finalizer; use sc_consensus::{BlockImport, BoxJustificationImport}; use sc_network_test::*; +use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; @@ -361,10 +362,7 @@ impl TestContext { #[test] fn tests_assumptions_sanity_check() { let env = TestContext::new(); - let config = env.link.genesis_config; - let test_config = create_test_config(); - - assert_eq!(config, test_config); + assert_eq!(env.link.genesis_config, create_test_config()); } #[test] @@ -430,7 +428,7 @@ fn claim_primary_slots_works() { let ticket_secret = TicketSecret { attempt_idx: 0, erased_secret: [0; 32] }; // Fail if we have authority key in our keystore but not ticket aux data - // ticket-aux: KO , authority-key: OK => FAIL + // ticket-aux = None && authority-key = Some => claim = None let claim = authorship::claim_slot( 0.into(), @@ -443,7 +441,7 @@ fn claim_primary_slots_works() { assert!(epoch.tickets_aux.is_empty()); // Success if we have ticket aux data and the authority key in our keystore - // ticket-aux: OK , authority-key: OK => SUCCESS + // ticket-aux = Some && authority-key = Some => claim = Some epoch .tickets_aux @@ -462,7 +460,7 @@ fn claim_primary_slots_works() { assert_eq!(auth_id, Keyring::Alice.public().into()); // Fail if we have ticket aux data but not the authority key in out keystore - // ticket-aux: OK , authority-key: KO => FAIL + // ticket-aux = Some && authority-key = None => claim = None epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); @@ -737,7 +735,7 @@ fn revert_prunes_epoch_changes_and_removes_weights() { } #[test] -fn revert_not_allowed_for_finalized() { +fn revert_stops_at_last_finalized() { let mut env = TestContext::new(); let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 3); @@ -745,7 +743,7 @@ fn revert_not_allowed_for_finalized() { // Finalize best block env.client.finalize_block(canon[2], None, false).unwrap(); - // Revert canon chain to last finalized block + // Reverts canon chain down to last finalized block crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); let weight_data_check = |hashes: &[Hash], expected: bool| { @@ -849,6 +847,7 @@ impl TestNetFactory for SassafrasTestNet { // Multiple nodes authoring and validating blocks #[tokio::test] async fn sassafras_network_progress() { + env_logger::init(); let net = SassafrasTestNet::new(3); let net = Arc::new(Mutex::new(net)); @@ -919,6 +918,9 @@ async fn sassafras_network_progress() { justification_sync_link: (), force_authoring: false, create_inherent_data_providers, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), }; let sassafras_worker = start_sassafras(sassafras_params).unwrap(); sassafras_workers.push(sassafras_worker); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index c16df446ab4ca..8353b297b58fc 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -403,7 +403,7 @@ where { warn!( target: LOG_TARGET, - "Error checking/reporting Sassafras equivocation: {}", err + "Error checking/reporting equivocation: {}", err ); } From 064dff01f20f671889f86ca9c24843c504697974 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 18:11:35 +0200 Subject: [PATCH 53/62] Fix. Native vs wasm usize lengths --- primitives/consensus/sassafras/src/ticket.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 842067d42e92d..2bd24a8b16a28 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -72,7 +72,8 @@ fn vrf_input_from_data( let raw = data.into_iter().fold(Vec::new(), |mut v, e| { let bytes = e.as_ref(); v.extend_from_slice(bytes); - v.extend_from_slice(&bytes.len().to_le_bytes()); + let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); + v.extend_from_slice(&len.to_le_bytes()); v }); VrfInput::new(domain, raw) From 998db327015d34a5c50c6f9c2ac77d604b2165fe Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 12 Aug 2023 18:18:17 +0200 Subject: [PATCH 54/62] Bump sassafras version to 0.3.4 --- Cargo.lock | 10 +++++----- bin/node-sassafras/node/Cargo.toml | 8 ++++---- bin/node-sassafras/runtime/Cargo.toml | 6 +++--- client/consensus/sassafras/Cargo.toml | 4 ++-- frame/sassafras/Cargo.toml | 4 ++-- primitives/consensus/sassafras/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 4 ++-- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1211c0427707d..2091b7ac2feb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5332,7 +5332,7 @@ dependencies = [ [[package]] name = "node-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" dependencies = [ "clap 4.3.2", "frame-benchmarking", @@ -5376,7 +5376,7 @@ dependencies = [ [[package]] name = "node-sassafras-runtime" -version = "0.3.3-dev" +version = "0.3.4-dev" dependencies = [ "frame-benchmarking", "frame-executive", @@ -7077,7 +7077,7 @@ dependencies = [ [[package]] name = "pallet-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" dependencies = [ "array-bytes 4.2.0", "env_logger 0.10.0", @@ -9231,7 +9231,7 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" dependencies = [ "async-trait", "env_logger 0.10.0", @@ -10882,7 +10882,7 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" dependencies = [ "async-trait", "merlin 2.0.1", diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml index cec3cd8dbb0a5..f476d589f274e 100644 --- a/bin/node-sassafras/node/Cargo.toml +++ b/bin/node-sassafras/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" authors = ["Parity Technologies ", "Davide Galassi "] description = "Node testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -29,8 +29,8 @@ sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" } -sc-consensus-sassafras = { version = "0.3.3-dev", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.3.3-dev", path = "../../../primitives/consensus/sassafras" } +sc-consensus-sassafras = { version = "0.3.4-dev", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.4-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } @@ -59,7 +59,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarkin frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } # Local Dependencies -node-sassafras-runtime = { version = "0.3.3-dev", path = "../runtime" } +node-sassafras-runtime = { version = "0.3.4-dev", path = "../runtime" } [build-dependencies] substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml index d0c1e86ca3204..6ec2b0aa45574 100644 --- a/bin/node-sassafras/runtime/Cargo.toml +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-sassafras-runtime" -version = "0.3.3-dev" +version = "0.3.4-dev" authors = ["Parity Technologies ","Davide Galassi "] description = "Runtime testbed for Sassafras consensus." homepage = "https://substrate.io/" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -pallet-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../../frame/sassafras" } +pallet-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } @@ -27,7 +27,7 @@ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} -sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 5db790b836da9..32e8ee75affc3 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" authors = ["Parity Technologies "] description = "Sassafras consensus algorithm for substrate" edition = "2021" @@ -33,7 +33,7 @@ sp-application-crypto = { version = "23.0.0", path = "../../../primitives/applic sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { version = "0.3.3-dev", path = "../../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.4-dev", path = "../../../primitives/consensus/sassafras" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 08cba5a8dd1ac..06f0c74ad446e 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" @@ -21,7 +21,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys log = { version = "0.4.17", default-features = false } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index bf0ff0a7f904d..b60aee644c6cb 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "0.3.3-dev" +version = "0.3.4-dev" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2021" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 5b82f30e2bef7..76f2f18362121 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -17,7 +17,7 @@ sp-application-crypto = { version = "23.0.0", default-features = false, path = " sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura", features = ["serde"] } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../primitives/genesis-builder" } -sp-consensus-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } @@ -33,7 +33,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } -pallet-sassafras = { version = "0.3.3-dev", default-features = false, path = "../../frame/sassafras" } +pallet-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../frame/balances" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../frame/executive" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } From 6e23f68b7839aa6a6196a02f0d62d6224b8201d2 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 14 Aug 2023 10:17:52 +0200 Subject: [PATCH 55/62] Sassafras structures overhaul --- Cargo.lock | 2 - bin/node-sassafras/node/src/chain_spec.rs | 6 +- bin/node-sassafras/node/src/service.rs | 11 +- client/consensus/sassafras/Cargo.toml | 1 + client/consensus/sassafras/src/authorship.rs | 95 ++++++------- client/consensus/sassafras/src/aux_schema.rs | 2 +- .../consensus/sassafras/src/block_import.rs | 14 +- client/consensus/sassafras/src/lib.rs | 95 +++++++------ client/consensus/sassafras/src/tests.rs | 62 ++++----- .../consensus/sassafras/src/verification.rs | 30 ++-- frame/sassafras/Cargo.toml | 1 + frame/sassafras/src/lib.rs | 65 +++++---- frame/sassafras/src/mock.rs | 36 +++-- frame/sassafras/src/tests.rs | 130 +++++++++--------- primitives/consensus/sassafras/Cargo.toml | 7 +- primitives/consensus/sassafras/README.md | 12 ++ primitives/consensus/sassafras/src/digests.rs | 46 +++---- .../consensus/sassafras/src/inherents.rs | 59 ++++---- primitives/consensus/sassafras/src/lib.rs | 79 +++++------ primitives/consensus/sassafras/src/ticket.rs | 32 ++--- primitives/inherents/src/client_side.rs | 8 +- test-utils/runtime/src/genesismap.rs | 2 +- 22 files changed, 387 insertions(+), 408 deletions(-) create mode 100644 primitives/consensus/sassafras/README.md diff --git a/Cargo.lock b/Cargo.lock index 2091b7ac2feb3..04ca9785ab104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10885,7 +10885,6 @@ name = "sp-consensus-sassafras" version = "0.3.4-dev" dependencies = [ "async-trait", - "merlin 2.0.1", "parity-scale-codec", "scale-info", "serde", @@ -10894,7 +10893,6 @@ dependencies = [ "sp-consensus-slots", "sp-core", "sp-inherents", - "sp-keystore", "sp-runtime", "sp-std", "sp-timestamp", diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs index 0cb9e31e9df4a..bbdcb9de414e8 100644 --- a/bin/node-sassafras/node/src/chain_spec.rs +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -6,7 +6,9 @@ use node_sassafras_runtime::{ use node_sassafras_runtime::{SessionConfig, SessionKeys}; use sc_service::ChainType; use sp_consensus_grandpa::AuthorityId as GrandpaId; -use sp_consensus_sassafras::{AuthorityId as SassafrasId, SassafrasEpochConfiguration}; +use sp_consensus_sassafras::{ + AuthorityId as SassafrasId, EpochConfiguration as SassafrasEpochConfig, +}; use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; @@ -131,7 +133,7 @@ fn testnet_genesis( authorities: Vec::new(), #[cfg(not(feature = "use-session-pallet"))] authorities: initial_authorities.iter().map(|x| x.1.clone()).collect(), - epoch_config: SassafrasEpochConfiguration { + epoch_config: SassafrasEpochConfig { attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, }, diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 26d01e20ff1d9..e2e0252dcdb47 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -112,7 +112,7 @@ pub fn new_partial( client.clone(), )?; - let slot_duration = sassafras_link.genesis_config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration; let import_queue = sc_consensus_sassafras::import_queue( sassafras_link.clone(), @@ -252,7 +252,7 @@ pub fn new_full(config: Configuration) -> Result { telemetry.as_ref().map(|x| x.handle()), ); - let slot_duration = sassafras_link.genesis_config().slot_duration(); + let slot_duration = sassafras_link.genesis_config().slot_duration; let sassafras_params = sc_consensus_sassafras::SassafrasWorkerParams { client: client.clone(), @@ -266,12 +266,7 @@ pub fn new_full(config: Configuration) -> Result { force_authoring, create_inherent_data_providers: move |_, _| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + let slot = sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration(*timestamp, slot_duration); Ok((slot, timestamp)) }, offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 32e8ee75affc3..b0a6878e12967 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -9,6 +9,7 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-consensus-sassafras" readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index ac78f7d192e36..f4d4e41b5ea9d 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -25,16 +25,15 @@ use sp_consensus_sassafras::{ digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, TicketEnvelope, TicketId, }; -use sp_core::{bandersnatch::ring_vrf::RingContext, ed25519, twox_64, ByteArray}; +use sp_core::{ + bandersnatch::ring_vrf::RingContext, ed25519::Pair as EphemeralPair, twox_64, ByteArray, +}; use std::pin::Pin; /// Get secondary authority index for the given epoch and slot. -pub(crate) fn secondary_authority_index( - slot: Slot, - config: &SassafrasConfiguration, -) -> AuthorityIndex { - u64::from_le_bytes((config.randomness, slot).using_encoded(twox_64)) as AuthorityIndex % - config.authorities.len() as AuthorityIndex +pub(crate) fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> AuthorityIndex { + u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) as AuthorityIndex % + epoch.authorities.len() as AuthorityIndex } /// Try to claim an epoch slot. @@ -45,18 +44,16 @@ pub(crate) fn claim_slot( maybe_ticket: Option<(TicketId, TicketBody)>, keystore: &KeystorePtr, ) -> Option<(PreDigest, AuthorityId)> { - let config = &epoch.config; - - if config.authorities.is_empty() { + if epoch.authorities.is_empty() { return None } - let mut vrf_sign_data = slot_claim_sign_data(&config.randomness, slot, epoch.epoch_idx); + let mut vrf_sign_data = slot_claim_sign_data(&epoch.randomness, slot, epoch.epoch_idx); let (authority_idx, ticket_claim) = match maybe_ticket { Some((ticket_id, ticket_data)) => { log::debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:016x})]"); - let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?.clone(); + let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?; log::debug!( target: LOG_TARGET, " got ticket: auth: {}, attempt: {}", @@ -67,19 +64,19 @@ pub(crate) fn claim_slot( vrf_sign_data.push_transcript_data(&ticket_data.encode()); let data = vrf_sign_data.challenge::<32>(); - let erased_pair = ed25519::Pair::from_seed(&ticket_secret.erased_secret); - let erased_signature = *erased_pair.sign(&data).as_ref(); + let erased_pair = EphemeralPair::from_seed(&ticket_secret.seed); + let erased_signature = erased_pair.sign(&data); let claim = TicketClaim { erased_signature }; (authority_idx, Some(claim)) }, None => { log::debug!(target: LOG_TARGET, "[TRY SECONDARY (slot {slot})]"); - (secondary_authority_index(slot, config), None) + (secondary_authority_index(slot, epoch), None) }, }; - let authority_id = config.authorities.get(authority_idx as usize)?; + let authority_id = epoch.authorities.get(authority_idx as usize)?; let vrf_signature = keystore .bandersnatch_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_sign_data) @@ -100,25 +97,24 @@ fn generate_epoch_tickets( keystore: &KeystorePtr, ring_ctx: &RingContext, ) -> Vec { - let config = &epoch.config; - let max_attempts = config.threshold_params.attempts_number; - let redundancy_factor = config.threshold_params.redundancy_factor; let mut tickets = Vec::new(); let threshold = ticket_id_threshold( - redundancy_factor, - config.epoch_duration as u32, - max_attempts, - config.authorities.len() as u32, + epoch.config.redundancy_factor, + epoch.epoch_duration as u32, + epoch.config.attempts_number, + epoch.authorities.len() as u32, ); // TODO-SASS-P4 remove me log::debug!(target: LOG_TARGET, "Generating tickets for epoch {} @ slot {}", epoch.epoch_idx, epoch.start_slot); log::debug!(target: LOG_TARGET, " threshold: {threshold:016x}"); // We need a list of raw unwrapped keys - let pks: Vec<_> = config.authorities.iter().map(|a| *a.as_ref()).collect(); + let pks: Vec<_> = epoch.authorities.iter().map(|a| *a.as_ref()).collect(); + + let mut tickets_aux = Vec::new(); - for (authority_idx, authority_id) in config.authorities.iter().enumerate() { + for (authority_idx, authority_id) in epoch.authorities.iter().enumerate() { if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { continue } @@ -128,7 +124,7 @@ fn generate_epoch_tickets( debug!(target: LOG_TARGET, ">>> ...done"); let make_ticket = |attempt_idx| { - let vrf_input = ticket_id_vrf_input(&config.randomness, attempt_idx, epoch.epoch_idx); + let vrf_input = ticket_id_vrf_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); let vrf_preout = keystore .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &vrf_input) @@ -139,16 +135,18 @@ fn generate_epoch_tickets( return None } - let (erased_pair, erased_seed) = ed25519::Pair::generate(); + // @davxy TODO: why not generate from seed. + // Seed computed as f(pair.seed || ticket_id) + let (erased_pair, erased_seed) = EphemeralPair::generate(); - let erased_public: [u8; 32] = *erased_pair.public().as_ref(); - let ticket_body = TicketBody { attempt_idx, erased_public }; + let erased_public = erased_pair.public(); + let body = TicketBody { attempt_idx, erased_public }; debug!(target: LOG_TARGET, ">>> Creating ring proof for attempt {}", attempt_idx); - let mut sign_data = ticket_body_sign_data(&ticket_body); + let mut sign_data = ticket_body_sign_data(&body); sign_data.push_vrf_input(vrf_input).expect("Can't fail"); - let ring_signature = keystore + let signature = keystore .bandersnatch_ring_vrf_sign( AuthorityId::ID, authority_id.as_ref(), @@ -158,23 +156,25 @@ fn generate_epoch_tickets( .ok()??; debug!(target: LOG_TARGET, ">>> ...done"); - let ticket_envelope = TicketEnvelope { body: ticket_body, ring_signature }; + let ticket_envelope = TicketEnvelope { body, signature }; - let ticket_secret = TicketSecret { attempt_idx, erased_secret: erased_seed }; + let ticket_secret = TicketSecret { attempt_idx, seed: erased_seed }; Some((ticket_id, ticket_envelope, ticket_secret)) }; - for attempt in 0..max_attempts { + for attempt in 0..epoch.config.attempts_number { if let Some((ticket_id, ticket_envelope, ticket_secret)) = make_ticket(attempt) { log::debug!(target: LOG_TARGET, " → {ticket_id:016x}"); - epoch - .tickets_aux - .insert(ticket_id, (authority_idx as AuthorityIndex, ticket_secret)); tickets.push(ticket_envelope); + tickets_aux.push((ticket_id, authority_idx as u32, ticket_secret)); } } } + + tickets_aux.into_iter().for_each(|(ticket_id, authority_idx, ticket_secret)| { + epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret)); + }); tickets } @@ -188,7 +188,7 @@ struct SlotWorker { keystore: KeystorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, - genesis_config: SassafrasConfiguration, + genesis_config: Epoch, } #[async_trait::async_trait] @@ -239,7 +239,7 @@ where self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) - .map(|epoch| epoch.as_ref().config.authorities.len()) + .map(|epoch| epoch.as_ref().authorities.len()) } async fn claim_slot( @@ -475,19 +475,8 @@ async fn start_tickets_worker( _ => None, }; - match err { - None => { - // Cache tickets in the epoch changes tree - epoch_changes - .shared_data() - .epoch_mut(&epoch_identifier) - .map(|target_epoch| target_epoch.tickets_aux = epoch.tickets_aux); - // TODO-SASS-P4: currently we don't persist the tickets proofs - // Thus on reboot/crash we are loosing them. - }, - Some(err) => { - error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); - }, + if let Some(err) = err { + error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); } } } @@ -611,7 +600,7 @@ where }; let slot_worker = sc_consensus_slots::start_slot_worker( - sassafras_link.genesis_config.slot_duration(), + sassafras_link.genesis_config.slot_duration, select_chain.clone(), sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), sync_oracle, diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 8c891ea0630f3..6b56011632671 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -54,7 +54,7 @@ where } } -/// Update the epoch changes on disk after a change. +/// Update the epoch changes to persist after a change. pub fn write_epoch_changes( epoch_changes: &EpochChangesFor, write_aux: F, diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 6bc3b93595801..c11daf58ba3f5 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -33,7 +33,7 @@ pub struct SassafrasBlockImport { inner: I, client: Arc, epoch_changes: SharedEpochChanges, - genesis_config: SassafrasConfiguration, + genesis_config: Epoch, } impl Clone for SassafrasBlockImport { @@ -72,7 +72,7 @@ where inner: I, client: Arc, epoch_changes: SharedEpochChanges, - genesis_config: SassafrasConfiguration, + genesis_config: Epoch, ) -> Self { let client_weak = Arc::downgrade(&client); let on_finality = move |notification: &FinalityNotification| { @@ -155,8 +155,7 @@ where // current epoch. We will figure out which is the first skipped epoch and we // will partially re-use its data for this "recovery" epoch. let epoch_data = viable_epoch.as_mut(); - let skipped_epochs = - (*slot - *epoch_data.start_slot) / epoch_data.config.epoch_duration; + let skipped_epochs = (*slot - *epoch_data.start_slot) / epoch_data.epoch_duration; let original_epoch_idx = epoch_data.epoch_idx; // NOTE: notice that we are only updating a local copy of the `Epoch`, this @@ -169,9 +168,8 @@ where // predicate `epoch.start_slot <= slot` which will still match correctly without // requiring to update `start_slot` to the correct value. epoch_data.epoch_idx += skipped_epochs; - epoch_data.start_slot = Slot::from( - *epoch_data.start_slot + skipped_epochs * epoch_data.config.epoch_duration, - ); + epoch_data.start_slot = + Slot::from(*epoch_data.start_slot + skipped_epochs * epoch_data.epoch_duration); log::warn!( target: LOG_TARGET, "Epoch(s) skipped from {} to {}", @@ -506,7 +504,7 @@ where /// Also returns a link object used to correctly instantiate the import queue /// and authoring worker. pub fn block_import( - genesis_config: SassafrasConfiguration, + genesis_config: Epoch, inner_block_import: I, client: Arc, ) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2ee1502bd580c..e3dfbce023ef0 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -73,14 +73,14 @@ use sp_runtime::{ DigestItem, }; -// Re-export Sassafras primitives. +// Re-export some primitives. pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, inherents::SassafrasInherentData, slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id_vrf_input, - AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, SassafrasApi, - SassafrasConfiguration, SassafrasEpochConfiguration, TicketBody, TicketClaim, TicketEnvelope, - TicketId, TicketSecret, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, + SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, }; mod authorship; @@ -187,27 +187,41 @@ fn sassafras_err(err: Error) -> Error { err } -/// Sassafras epoch information augmented with private tickets information. -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug)] +/// Secret seed +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct TicketSecret { + /// Attempt index + pub(crate) attempt_idx: u32, + /// Secret seed + pub(crate) seed: [u8; 32], +} + +/// Primitive epoch newtype. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] pub struct Epoch { - /// The epoch index. - pub epoch_idx: u64, - /// The starting slot of the epoch. - pub start_slot: Slot, - /// Epoch configuration. - pub config: SassafrasConfiguration, - /// Tickets associated secret data. - pub tickets_aux: BTreeMap, + pub(crate) inner: sp_consensus_sassafras::Epoch, + pub(crate) tickets_aux: BTreeMap, +} + +use std::ops::{Deref, DerefMut}; + +impl Deref for Epoch { + type Target = sp_consensus_sassafras::Epoch; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for Epoch { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } } impl From for Epoch { fn from(epoch: sp_consensus_sassafras::Epoch) -> Self { - Epoch { - epoch_idx: epoch.epoch_idx, - start_slot: epoch.start_slot, - config: epoch.config, - tickets_aux: BTreeMap::new(), - } + Epoch { inner: epoch, tickets_aux: Default::default() } } } @@ -216,19 +230,16 @@ impl EpochT for Epoch { type Slot = Slot; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - let config = SassafrasConfiguration { - slot_duration: self.config.slot_duration, - epoch_duration: self.config.epoch_duration, + sp_consensus_sassafras::Epoch { + epoch_idx: self.epoch_idx + 1, + start_slot: self.start_slot + self.epoch_duration, + slot_duration: self.slot_duration, + epoch_duration: self.epoch_duration, authorities: descriptor.authorities, randomness: descriptor.randomness, - threshold_params: descriptor.config.unwrap_or(self.config.threshold_params.clone()), - }; - Epoch { - epoch_idx: self.epoch_idx + 1, - start_slot: self.start_slot + config.epoch_duration, - config, - tickets_aux: BTreeMap::new(), + config: descriptor.config.unwrap_or(self.config), } + .into() } fn start_slot(&self) -> Slot { @@ -236,26 +247,24 @@ impl EpochT for Epoch { } fn end_slot(&self) -> Slot { - self.start_slot + self.config.epoch_duration + self.start_slot + self.epoch_duration } } impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. - pub fn genesis(config: &SassafrasConfiguration, slot: Slot) -> Epoch { - Epoch { - epoch_idx: 0, - start_slot: slot, - config: config.clone(), - tickets_aux: BTreeMap::new(), - } + pub fn genesis(config: &Epoch, slot: Slot) -> Epoch { + let mut epoch = config.clone(); + epoch.epoch_idx = 0; + epoch.start_slot = slot; + epoch } } /// Read protocol configuration from the blockchain state corresponding /// to the last finalized block -pub fn finalized_configuration(client: &C) -> ClientResult +pub fn finalized_configuration(client: &C) -> ClientResult where B: BlockT, C: ProvideRuntimeApi + UsageProvider, @@ -268,7 +277,7 @@ where }); let epoch = client.runtime_api().current_epoch(hash)?; - Ok(epoch.config) + Ok(epoch.into()) } /// Intermediate value passed to block importer from authoring or validation logic. @@ -328,12 +337,12 @@ pub struct SassafrasLink { /// Epoch changes tree epoch_changes: SharedEpochChanges, /// Startup configuration. Read from runtime at last finalized block. - genesis_config: SassafrasConfiguration, + genesis_config: Epoch, } impl SassafrasLink { /// Get the config of this link. - pub fn genesis_config(&self) -> &SassafrasConfiguration { + pub fn genesis_config(&self) -> &Epoch { &self.genesis_config } } @@ -382,8 +391,8 @@ where select_chain, create_inherent_data_providers, sassafras_link.epoch_changes, - telemetry, sassafras_link.genesis_config, + telemetry, ); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index ebfda47018f71..23526c007fcb9 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -18,7 +18,7 @@ //! Sassafras client tests -// TODO-SASS-P3 +// TODO @davxy // Missing tests // - verify block claimed via primary method // - tests using tickets to claim slots. Curret tests just doesn't register any on-chain ticket @@ -35,7 +35,8 @@ use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; -use sp_consensus_sassafras::inherents::InherentDataProvider; +use sp_consensus_sassafras::{inherents::InherentDataProvider, EphemeralPublic, SlotDuration}; +use sp_core::crypto::UncheckedFrom; use sp_keyring::BandersnatchKeyring as Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{Digest, DigestItem}; @@ -141,10 +142,9 @@ struct TestContext { fn create_test_verifier( client: Arc, link: &SassafrasLink, - config: SassafrasConfiguration, + config: Epoch, ) -> SassafrasVerifier { - let slot_duration = config.slot_duration(); - + let slot_duration = config.slot_duration; let create_inherent_data_providers = Box::new(move |_, _| async move { let slot = InherentDataProvider::from_timestamp_and_slot_duration( Timestamp::current(), @@ -160,14 +160,14 @@ fn create_test_verifier( longest_chain, create_inherent_data_providers, link.epoch_changes.clone(), - None, config, + None, ) } fn create_test_block_import( client: Arc, - config: SassafrasConfiguration, + config: Epoch, ) -> (SassafrasBlockImport, SassafrasLink) { crate::block_import(config, client.clone(), client.clone()) .expect("can initialize block-import") @@ -181,9 +181,11 @@ fn create_test_keystore(authority: Keyring) -> KeystorePtr { keystore.into() } -fn create_test_config() -> SassafrasConfiguration { - SassafrasConfiguration { - slot_duration: SLOT_DURATION, +fn create_test_epoch() -> Epoch { + sp_consensus_sassafras::Epoch { + epoch_idx: 0, + start_slot: 0.into(), + slot_duration: SlotDuration::from_millis(SLOT_DURATION), epoch_duration: EPOCH_DURATION, authorities: vec![ Keyring::Alice.public().into(), @@ -191,8 +193,9 @@ fn create_test_config() -> SassafrasConfiguration { Keyring::Charlie.public().into(), ], randomness: [0; 32], - threshold_params: SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, + config: EpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, } + .into() } impl TestContext { @@ -362,29 +365,24 @@ impl TestContext { #[test] fn tests_assumptions_sanity_check() { let env = TestContext::new(); - assert_eq!(env.link.genesis_config, create_test_config()); + assert_eq!(env.link.genesis_config, create_test_epoch()); } #[test] fn claim_secondary_slots_works() { - let mut config = create_test_config(); - config.randomness = [2; 32]; + let mut epoch = create_test_epoch(); + epoch.epoch_idx = 1; + epoch.start_slot = 6.into(); + epoch.randomness = [2; 32]; let authorities = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; - let mut epoch = Epoch { - epoch_idx: 1, - start_slot: 6.into(), - config: config.clone(), - tickets_aux: Default::default(), - }; - - let mut assignments = vec![usize::MAX; config.epoch_duration as usize]; + let mut assignments = vec![usize::MAX; epoch.epoch_duration as usize]; for (auth_idx, auth_id) in authorities.iter().enumerate() { let keystore = create_test_keystore(*auth_id); - for slot in 0..config.epoch_duration { + for slot in 0..epoch.epoch_duration { if let Some((claim, auth_id2)) = authorship::claim_slot(slot.into(), &mut epoch, None, &keystore) { @@ -410,22 +408,18 @@ fn claim_primary_slots_works() { // If a node has in its epoch `tickets_aux` the information corresponding to the // ticket that is presented. Then the claim ticket should just return the // ticket auxiliary information. - let mut config = create_test_config(); - config.randomness = [2; 32]; - - let mut epoch = Epoch { - epoch_idx: 1, - start_slot: 6.into(), - config: config.clone(), - tickets_aux: Default::default(), - }; + let mut epoch = create_test_epoch(); + epoch.randomness = [2; 32]; + epoch.epoch_idx = 1; + epoch.start_slot = 6.into(); let keystore = create_test_keystore(Keyring::Alice); let alice_authority_idx = 0_u32; let ticket_id = 123; - let ticket_body = TicketBody { attempt_idx: 0, erased_public: [0; 32] }; - let ticket_secret = TicketSecret { attempt_idx: 0, erased_secret: [0; 32] }; + let erased_public = EphemeralPublic::unchecked_from([0; 32]); + let ticket_body = TicketBody { attempt_idx: 0, erased_public }; + let ticket_secret = TicketSecret { attempt_idx: 0, seed: [0; 32] }; // Fail if we have authority key in our keystore but not ticket aux data // ticket-aux = None && authority-key = Some => claim = None diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 8353b297b58fc..29abffe591a15 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -19,8 +19,10 @@ //! Types and functions related to block verification. use super::*; -use sp_application_crypto::Wraps; -use sp_core::{crypto::VrfPublic, ed25519}; +use sp_core::{ + crypto::{VrfPublic, Wraps}, + ed25519::Pair as EphemeralPair, +}; // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; @@ -63,7 +65,6 @@ fn check_header( ) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch, origin, maybe_ticket } = params; - let config = &epoch.config; let seal = header .digest_mut() @@ -76,7 +77,7 @@ fn check_header( return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) } - let Some(authority_id) = config.authorities.get(pre_digest.authority_idx as usize) else { + let Some(authority_id) = epoch.authorities.get(pre_digest.authority_idx as usize) else { return Err(sassafras_err(Error::SlotAuthorNotFound)) }; @@ -94,7 +95,7 @@ fn check_header( // Optionally check ticket ownership let mut vrf_sign_data = - slot_claim_sign_data(&config.randomness, pre_digest.slot, epoch.epoch_idx); + slot_claim_sign_data(&epoch.randomness, pre_digest.slot, epoch.epoch_idx); match (&maybe_ticket, &pre_digest.ticket_claim) { (Some((_ticket_id, ticket_data)), Some(ticket_claim)) => { @@ -103,16 +104,17 @@ fn check_header( vrf_sign_data.push_transcript_data(&ticket_data.encode()); let challenge = vrf_sign_data.challenge::<32>(); - let erased_public = ed25519::Public::from_raw(ticket_data.erased_public); - let erased_signature = ed25519::Signature::from_raw(ticket_claim.erased_signature); - - if !ed25519::Pair::verify(&erased_signature, &challenge, &erased_public) { + if !EphemeralPair::verify( + &ticket_claim.erased_signature, + &challenge, + &ticket_data.erased_public, + ) { return Err(sassafras_err(Error::BadSignature(pre_hash))) } }, (None, None) => { log::debug!(target: LOG_TARGET, "checking secondary"); - let idx = authorship::secondary_authority_index(pre_digest.slot, config); + let idx = authorship::secondary_authority_index(pre_digest.slot, epoch); if idx != pre_digest.authority_idx { log::error!(target: LOG_TARGET, "Bad secondary authority index"); return Err(Error::SlotAuthorNotFound) @@ -149,8 +151,8 @@ pub struct SassafrasVerifier { select_chain: SelectChain, create_inherent_data_providers: CIDP, epoch_changes: SharedEpochChanges, + genesis_config: Epoch, telemetry: Option, - genesis_config: SassafrasConfiguration, } impl SassafrasVerifier { @@ -160,16 +162,16 @@ impl SassafrasVerifier, + genesis_config: Epoch, telemetry: Option, - genesis_config: SassafrasConfiguration, ) -> Self { SassafrasVerifier { client, select_chain, create_inherent_data_providers, epoch_changes, - telemetry, genesis_config, + telemetry, } } } @@ -419,7 +421,7 @@ where .create_inherent_data() .await .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(pre_digest.slot); + inherent_data.sassafras_replace_inherent_data(&pre_digest.slot); self.check_inherents( new_block.clone(), parent_hash, diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 06f0c74ad446e..d30c38d7d2979 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -8,6 +8,7 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Consensus extension module for Sassafras consensus." readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 1073be65dd180..8e3c753546f6c 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -57,9 +57,8 @@ use frame_system::{ }; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, Epoch, EquivocationProof, Randomness, RingContext, SassafrasConfiguration, - SassafrasEpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, - SASSAFRAS_ENGINE_ID, + AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, RingContext, Slot, + SlotDuration, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -193,12 +192,12 @@ pub mod pallet { /// The configuration for the current epoch. #[pallet::storage] #[pallet::getter(fn config)] - pub type EpochConfig = StorageValue<_, SassafrasEpochConfiguration, ValueQuery>; + pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; /// The configuration for the next epoch. #[pallet::storage] #[pallet::getter(fn next_config)] - pub type NextEpochConfig = StorageValue<_, SassafrasEpochConfiguration>; + pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next /// epoch is enacted. @@ -206,7 +205,7 @@ pub mod pallet { /// This is to maintain coherence for already submitted tickets for epoch N+1 that where /// computed using configuration parameters stored for epoch N+1. #[pallet::storage] - pub(super) type PendingEpochConfigChange = StorageValue<_, SassafrasEpochConfiguration>; + pub(super) type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; /// Stored tickets metadata. #[pallet::storage] @@ -244,7 +243,7 @@ pub mod pallet { /// Genesis authorities. pub authorities: Vec, /// Genesis epoch configuration. - pub epoch_config: SassafrasEpochConfiguration, + pub epoch_config: EpochConfiguration, /// Phantom config #[serde(skip)] pub _phantom: sp_std::marker::PhantomData, @@ -256,11 +255,11 @@ pub mod pallet { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); - // TODO: davxy... remove for tests - log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); - let ring_ctx = RingContext::new_testing(); - log::warn!(target: LOG_TARGET, "... done"); - RingVrfContext::::set(Some(ring_ctx.clone())); + // // TODO: davxy... remove for tests + // log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); + // let ring_ctx = RingContext::new_testing(); + // log::warn!(target: LOG_TARGET, "... done"); + // RingVrfContext::::set(Some(ring_ctx.clone())); } } @@ -405,7 +404,7 @@ pub mod pallet { epoch_idx, ); - let Some(vrf_preout) = ticket.ring_signature.outputs.get(0) else { + let Some(vrf_preout) = ticket.signature.outputs.get(0) else { log::debug!(target: LOG_TARGET, "Missing ticket pre-output from ring signature"); continue }; @@ -418,7 +417,7 @@ pub mod pallet { let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&ticket.body); sign_data.push_vrf_input(vrf_input).expect("Can't fail"); - if ticket.ring_signature.verify(&sign_data, &verifier) { + if ticket.signature.verify(&sign_data, &verifier) { TicketsData::::set(ticket_id, Some(ticket.body)); segment .try_push(ticket_id) @@ -453,7 +452,7 @@ pub mod pallet { #[pallet::weight({0})] pub fn plan_config_change( origin: OriginFor, - config: SassafrasEpochConfiguration, + config: EpochConfiguration, ) -> DispatchResult { ensure_root(origin)?; @@ -808,34 +807,34 @@ impl Pallet { Self::deposit_consensus(ConsensusLog::NextEpochData(next)); } - /// Current epoch configuration. + /// Current epoch information. pub fn current_epoch() -> Epoch { - let config = SassafrasConfiguration { - slot_duration: T::SlotDuration::get(), + let epoch_idx = EpochIndex::::get(); + Epoch { + epoch_idx, + start_slot: Self::epoch_start(epoch_idx), + slot_duration: SlotDuration::from_millis(T::SlotDuration::get()), epoch_duration: T::EpochDuration::get(), authorities: Self::authorities().to_vec(), randomness: Self::randomness(), - threshold_params: Self::config(), - }; - let epoch_idx = EpochIndex::::get(); - let start_slot = Self::current_epoch_start(); - Epoch { epoch_idx, start_slot, config } + config: Self::config(), + } } - /// Next epoch configuration. + /// Next epoch information. pub fn next_epoch() -> Epoch { - let config = SassafrasConfiguration { - slot_duration: T::SlotDuration::get(), - epoch_duration: T::EpochDuration::get(), - authorities: Self::next_authorities().to_vec(), - randomness: Self::next_randomness(), - threshold_params: Self::next_config().unwrap_or_else(|| Self::config()), - }; let epoch_idx = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - let start_slot = Self::epoch_start(epoch_idx); - Epoch { epoch_idx, start_slot, config } + Epoch { + epoch_idx, + start_slot: Self::epoch_start(epoch_idx), + slot_duration: SlotDuration::from_millis(T::SlotDuration::get()), + epoch_duration: T::EpochDuration::get(), + authorities: Self::next_authorities().to_vec(), + randomness: Self::next_randomness(), + config: Self::next_config().unwrap_or_else(|| Self::config()), + } } /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index 0572627b39b43..ee234749bf9dc 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -22,11 +22,12 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever, *}; use frame_support::traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityIndex, AuthorityPair, RingProver, SassafrasEpochConfiguration, - Slot, TicketBody, TicketEnvelope, TicketId, VrfSignature, + digests::PreDigest, AuthorityIndex, AuthorityPair, EpochConfiguration, RingProver, Slot, + TicketBody, TicketEnvelope, TicketId, VrfSignature, }; use sp_core::{ - crypto::{Pair, VrfSecret, Wraps}, + crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, + ed25519::Public as EphemeralPublic, H256, U256, }; use sp_runtime::{ @@ -100,8 +101,8 @@ frame_support::construct_runtime!( // // The redundancy factor has been set to max value to accept all submitted // tickets without worrying about the threshold. -pub const TEST_EPOCH_CONFIGURATION: SassafrasEpochConfiguration = - SassafrasEpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 32 }; +pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = + EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 32 }; /// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { @@ -157,18 +158,25 @@ fn make_ticket_with_prover( let epoch = Sassafras::epoch_index() + 1; let randomness = Sassafras::next_randomness(); - let body = TicketBody { attempt_idx: attempt, erased_public: [0; 32] }; + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw.copy_from_slice(&pair.public().as_slice()[0..32]); + let erased_public = EphemeralPublic::unchecked_from(raw); + + let body = TicketBody { attempt_idx: attempt, erased_public }; let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&body); let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt, epoch); sign_data.push_vrf_input(vrf_input).unwrap(); - let ring_signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); // Ticket-id can be generated via vrf-preout. // We don't care that much about its value here. - TicketEnvelope { body, ring_signature } + TicketEnvelope { body, signature } } pub fn make_prover(pair: &AuthorityPair) -> RingProver { @@ -216,11 +224,13 @@ pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, Ti let id = sp_consensus_sassafras::ticket_id(&input, &output); - // Make a dummy ephemeral public that hopefully is unique within a test instance - use sp_core::ByteArray; - let mut erased_public = [0; 32]; - erased_public[..16].copy_from_slice(&pair.public().as_slice()[0..16]); - erased_public[16..].copy_from_slice(&id.to_le_bytes()); + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); + raw[16..].copy_from_slice(&id.to_le_bytes()); + let erased_public = EphemeralPublic::unchecked_from(raw); let body = TicketBody { attempt_idx, erased_public }; diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs index 674e30d121c7f..4b2b22d64d3ec 100644 --- a/frame/sassafras/src/tests.rs +++ b/frame/sassafras/src/tests.rs @@ -150,7 +150,7 @@ fn on_first_block_after_genesis() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("7ca54f761c6ec87503367cb3418740b8bab9796f861b9b1cb4945344bd5e87ca"), + h2b("416f7e78a0390e14677782ea22102ba749eb9de7d02df46b39d1e3d6e6759c62"), ); // Header data check @@ -201,7 +201,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("7ca54f761c6ec87503367cb3418740b8bab9796f861b9b1cb4945344bd5e87ca"), + h2b("416f7e78a0390e14677782ea22102ba749eb9de7d02df46b39d1e3d6e6759c62"), ); let header = finalize_block(end_block); @@ -219,7 +219,7 @@ fn on_normal_block() { println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("ec9ccd9bf272de069b0e51089e7182008ed7edef3ed878bb703e9e8945ead5ed"), + h2b("eab1c5692bf3255ae46b2e732d061700fcd51ab57f029ad39983ceae5214a713"), ); // Header data check @@ -257,12 +257,12 @@ fn produce_epoch_change_digest_no_config() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("85b976e3d66ecba38053d508dbccf1a17b36958fd2c2888669e439671f9b4e09"), + h2b("cb52dcf3b0caca956453d42004ac1b8005a26be669c2aaf534548e0b4c872a52"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("f98d9bcc7f368068c93a68f8c1eb016a15612916bda89443eda9921b8402af4c"), + h2b("ce3e3aeae02c85a8e0c8ee0ff0b120484df4551491ac2296e40147634ca4c58c"), ); let header = finalize_block(end_block); @@ -279,12 +279,12 @@ fn produce_epoch_change_digest_no_config() { println!("{}", b2h(NextRandomness::::get())); assert_eq!( NextRandomness::::get(), - h2b("85b976e3d66ecba38053d508dbccf1a17b36958fd2c2888669e439671f9b4e09"), + h2b("cb52dcf3b0caca956453d42004ac1b8005a26be669c2aaf534548e0b4c872a52"), ); println!("{}", b2h(RandomnessAccumulator::::get())); assert_eq!( RandomnessAccumulator::::get(), - h2b("7e3439ef345329ca6cc0e0b1f31cfb28b462540db2258e5c7c61e4d1f366013b"), + h2b("1288d911ca5deb9c514149d4fdb64ebf94e63989e09e03bc69218319456d4ec9"), ); // Header data check @@ -314,7 +314,7 @@ fn produce_epoch_change_digest_with_config() { initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - let config = SassafrasEpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; Sassafras::plan_config_change(RuntimeOrigin::root(), config.clone()).unwrap(); // We want to trigger an epoch change in this test. @@ -343,64 +343,6 @@ fn produce_epoch_change_digest_with_config() { }) } -// TODO davxy: create a read_tickets method which reads pre-constructed good tickets -// from a file. Creating this stuff "on-the-fly" is just too much expensive -// -// A valid ring-context is required for this test since we are passing though the -// `submit_ticket` call which tests for ticket validity. -#[test] -fn submit_tickets_with_ring_proof_check_works() { - let (pairs, mut ext) = new_test_ext_with_pairs(10, true); - let pair = &pairs[0]; - let segments_count = 3; - - ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - let max_tickets: u32 = ::MaxTickets::get(); - let attempts_number = segments_count * max_tickets; - - // Tweak the epoch config to discard some of the tickets - let mut config = EpochConfig::::get(); - config.redundancy_factor = 7; - config.attempts_number = attempts_number; - EpochConfig::::set(config); - - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - - // Check state before tickets submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, - ); - - // Populate the segments via the `submit_tickets` - let tickets = make_tickets(attempts_number, pair); - let segment_len = tickets.len() / segments_count as usize; - for i in 0..segments_count as usize { - println!("Submit tickets"); - let segment = - tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); - Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); - } - - // Check state after submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { segments_count, tickets_count: [0, 0] }, - ); - - finalize_block(start_block); - - // Check against the expected results given the known inputs - assert_eq!(NextTicketsSegments::::get(0).len(), 2); - let seg = NextTicketsSegments::::get(1); - assert_eq!(seg.len(), 3); - let seg = NextTicketsSegments::::get(2); - assert_eq!(seg.len(), 2); - }) -} - #[test] fn segments_incremental_sort_works() { let (pairs, mut ext) = new_test_ext_with_pairs(1, false); @@ -663,3 +605,59 @@ fn obsolete_tickets_are_removed_on_epoch_change() { }); }) } + +// TODO davxy: create a read_tickets method which reads pre-constructed good tickets +// from a file. Creating this stuff "on-the-fly" is just too much expensive +// +// A valid ring-context is required for this test since we are passing though the +// `submit_ticket` call which tests for ticket validity. +#[test] +fn submit_tickets_with_ring_proof_check_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(10, true); + let pair = &pairs[0]; + let segments_count = 3; + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + let attempts_number = segments_count * max_tickets; + + // Tweak the epoch config to discard some of the tickets + let mut config = EpochConfig::::get(); + config.redundancy_factor = 7; + config.attempts_number = attempts_number; + EpochConfig::::set(config); + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, + ); + + // Populate the segments via the `submit_tickets` + let tickets = make_tickets(attempts_number, pair); + let segment_len = tickets.len() / segments_count as usize; + for i in 0..segments_count as usize { + println!("Submit tickets"); + let segment = + tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); + } + + // Check state after submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count, tickets_count: [0, 0] }, + ); + + finalize_block(start_block); + + // Check against the expected results given the known inputs + assert_eq!(NextTicketsSegments::::get(0).len(), 6); + assert_eq!(NextTicketsSegments::::get(1).len(), 1); + assert_eq!(NextTicketsSegments::::get(2).len(), 2); + }) +} diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index b60aee644c6cb..8d687aabee00b 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -9,22 +9,21 @@ homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-consensus-sassafras" readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.50", optional = true } -merlin = { version = "2.0", default-features = false } scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.163", default-features = false, features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } sp-core = { version = "21.0.0", default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.27.0", default-features = false, optional = true, path = "../../keystore", features = ["bandersnatch-experimental"] } sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } sp-std = { version = "8.0.0", default-features = false, path = "../../std" } sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } @@ -33,7 +32,6 @@ sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp default = ["std"] std = [ "async-trait", - "merlin/std", "scale-codec/std", "scale-info/std", "serde/std", @@ -42,7 +40,6 @@ std = [ "sp-consensus-slots/std", "sp-core/std", "sp-inherents/std", - "sp-keystore/std", "sp-runtime/std", "sp-std/std", "sp-timestamp", diff --git a/primitives/consensus/sassafras/README.md b/primitives/consensus/sassafras/README.md new file mode 100644 index 0000000000000..f632ce5ba534d --- /dev/null +++ b/primitives/consensus/sassafras/README.md @@ -0,0 +1,12 @@ +Primitives for SASSAFRAS. + +# ⚠️ WARNING ⚠️ + +The crate interfaces and structures are highly experimental and may be subject +to significant changes. + +Depends on upstream experimental feature: `bandersnatch-experimental`. + +These structs were mostly extracted from the main SASSAFRAS protocol PR: https://github.com/paritytech/substrate/pull/11879. + +Tracking issue: https://github.com/paritytech/substrate/issues/11515. diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 966220c0f83df..1971540351d3c 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Private implementation details of Sassafras digests. +//! Sassafras digests structures and helpers. -use super::{ - ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, - SassafrasEpochConfiguration, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, +use crate::{ + ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, EpochConfiguration, + Randomness, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -28,7 +28,7 @@ use scale_info::TypeInfo; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; -/// Sassafras primary slot assignment pre-digest. +/// Sassafras slot assignment pre-digest. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct PreDigest { /// Authority index that claimed the slot. @@ -36,49 +36,47 @@ pub struct PreDigest { /// Corresponding slot number. pub slot: Slot, /// Slot claim VRF signature. - /// TODO DAVXY we can store this Signature as a Seal DigestItem pub vrf_signature: VrfSignature, /// Ticket auxiliary information for claim check. pub ticket_claim: Option, } -/// Information about the next epoch. This is broadcast in the first block -/// of the epoch. +/// Information about the next epoch. +/// +/// This is broadcast in the first block of each epoch. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { - /// The authorities. + /// Authorities list. pub authorities: Vec, - /// The value of randomness to use for the slot-assignment. + /// Epoch randomness. pub randomness: Randomness, - /// Algorithm parameters. If not present, previous epoch parameters are used. - pub config: Option, + /// Mutable epoch parameters. If not present previous epoch parameters are used. + pub config: Option, } -/// An consensus log item for BABE. +/// Consensus log item. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { - /// The epoch has changed. This provides information about the _next_ - /// epoch - information about the _current_ epoch (i.e. the one we've just - /// entered) should already be available earlier in the chain. + /// Provides information about the next epoch parameters. #[codec(index = 1)] NextEpochData(NextEpochDescriptor), - /// Disable the authority with given index. + /// Disable the authority with given index (TODO @davxy). #[codec(index = 2)] OnDisabled(AuthorityIndex), } -/// A digest item which is usable with Sassafras consensus. -pub trait CompatibleDigestItem: Sized { - /// Construct a digest item which contains a Sassafras pre-digest. +/// A digest item which is usable by Sassafras. +pub trait CompatibleDigestItem { + /// Construct a digest item which contains a `PreDigest`. fn sassafras_pre_digest(seal: PreDigest) -> Self; - /// If this item is an Sassafras pre-digest, return it. + /// If this item is a `PreDigest`, return it. fn as_sassafras_pre_digest(&self) -> Option; - /// Construct a digest item which contains a Sassafras seal. + /// Construct a digest item which contains an `AuthoritySignature`. fn sassafras_seal(signature: AuthoritySignature) -> Self; - /// If this item is a Sassafras signature, return the signature. + /// If this item is an `AuthoritySignature`, return it. fn as_sassafras_seal(&self) -> Option; } diff --git a/primitives/consensus/sassafras/src/inherents.rs b/primitives/consensus/sassafras/src/inherents.rs index d6254a80a16e8..70025267fa6b4 100644 --- a/primitives/consensus/sassafras/src/inherents.rs +++ b/primitives/consensus/sassafras/src/inherents.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,47 +15,49 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Inherents for Sassafras +//! Sassafras inherents structures and helpers. use sp_inherents::{Error, InherentData, InherentIdentifier}; use sp_std::result::Result; -/// The Sassafras inherent identifier. +/// Inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; -/// The type of the Sassafras inherent. +/// The type of inherent. pub type InherentType = sp_consensus_slots::Slot; -/// Auxiliary trait to extract Sassafras inherent data. +/// Auxiliary trait to extract inherent data. pub trait SassafrasInherentData { - /// Get Sassafras inherent data. - fn sassafras_inherent_data(&self) -> Result, Error>; - /// Replace Sassafras inherent data. - fn sassafras_replace_inherent_data(&mut self, new: InherentType); + /// Get inherent data. + fn sassafras_get_inherent_data(&self) -> Result, Error>; + /// Put inherent data. + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error>; + /// Replace inherent data. + fn sassafras_replace_inherent_data(&mut self, data: &InherentType); } impl SassafrasInherentData for InherentData { - fn sassafras_inherent_data(&self) -> Result, Error> { + fn sassafras_get_inherent_data(&self) -> Result, Error> { self.get_data(&INHERENT_IDENTIFIER) } - fn sassafras_replace_inherent_data(&mut self, new: InherentType) { - self.replace_data(INHERENT_IDENTIFIER, &new); + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error> { + self.put_data(INHERENT_IDENTIFIER, data) } -} -/// Provides the slot duration inherent data for Sassafras. -// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 -#[cfg(feature = "std")] -pub struct InherentDataProvider { - slot: InherentType, + fn sassafras_replace_inherent_data(&mut self, data: &InherentType) { + self.replace_data(INHERENT_IDENTIFIER, data); + } } +/// Provides the slot duration inherent data. +pub struct InherentDataProvider(InherentType); + #[cfg(feature = "std")] impl InherentDataProvider { /// Create new inherent data provider from the given `slot`. pub fn new(slot: InherentType) -> Self { - Self { slot } + Self(slot) } /// Creates the inherent data provider by calculating the slot from the given @@ -64,14 +66,12 @@ impl InherentDataProvider { timestamp: sp_timestamp::Timestamp, slot_duration: sp_consensus_slots::SlotDuration, ) -> Self { - let slot = InherentType::from_timestamp(timestamp, slot_duration); - - Self { slot } + Self(InherentType::from_timestamp(timestamp, slot_duration)) } /// Returns the `slot` of this inherent data provider. pub fn slot(&self) -> InherentType { - self.slot + self.0 } } @@ -80,7 +80,7 @@ impl sp_std::ops::Deref for InherentDataProvider { type Target = InherentType; fn deref(&self) -> &Self::Target { - &self.slot + &self.0 } } @@ -88,15 +88,6 @@ impl sp_std::ops::Deref for InherentDataProvider { #[async_trait::async_trait] impl sp_inherents::InherentDataProvider for InherentDataProvider { async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) - } - - async fn try_handle_error( - &self, - _: &InherentIdentifier, - _: &[u8], - ) -> Option> { - // There is no error anymore - None + inherent_data.sassafras_put_inherent_data(&self.0) } } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c3df3ab675bcf..c0d3725ea7154 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Primitives for Sassafras -//! TODO-SASS-P2 : write proper docs +//! Primitives for Sassafras consensus. -// TODO davxy enable warnings -// #![deny(warnings)] -// #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] +#![deny(warnings)] +#![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use sp_core::crypto::KeyTypeId; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; @@ -37,14 +33,17 @@ pub use sp_core::bandersnatch::{ vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, }; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + pub mod digests; pub mod inherents; pub mod ticket; pub use ticket::{ slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id, - ticket_id_threshold, ticket_id_vrf_input, TicketBody, TicketClaim, TicketEnvelope, TicketId, - TicketSecret, + ticket_id_threshold, ticket_id_vrf_input, EphemeralPublic, EphemeralSignature, TicketBody, + TicketClaim, TicketEnvelope, TicketId, }; mod app { @@ -52,7 +51,7 @@ mod app { app_crypto!(bandersnatch, SASSAFRAS); } -/// Key type for Sassafras protocol. +/// Key type identifier. pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; /// Consensus engine identifier. @@ -61,7 +60,7 @@ pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; /// VRF output length for per-slot randomness. pub const RANDOMNESS_LENGTH: usize = 32; -/// The index of an authority. +/// Index of an authority. pub type AuthorityIndex = u32; /// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in @@ -83,54 +82,42 @@ pub type SassafrasBlockWeight = u32; /// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). pub type EquivocationProof = sp_consensus_slots::EquivocationProof; -/// Randomness required by some SASSAFRAS operations. +/// Randomness required by some protocol's operations. pub type Randomness = [u8; RANDOMNESS_LENGTH]; -/// Configuration data used by the Sassafras consensus engine. -#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct SassafrasConfiguration { - /// The slot duration in milliseconds. - pub slot_duration: u64, - /// The duration of epoch in slots. - pub epoch_duration: u64, - /// The authorities for the epoch. - pub authorities: Vec, - /// The randomness for the epoch. - pub randomness: Randomness, - /// Tickets threshold parameters. - pub threshold_params: SassafrasEpochConfiguration, -} - -impl SassafrasConfiguration { - /// Get the slot duration defined in the genesis configuration. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.slot_duration) - } +/// Configuration data that can be modified on epoch change. +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct EpochConfiguration { + /// Tickets threshold redundancy factor. + pub redundancy_factor: u32, + /// Tickets attempts for each validator. + pub attempts_number: u32, } /// Sassafras epoch information -#[derive(Encode, Decode, PartialEq, Eq, Clone, Debug, TypeInfo)] +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo)] pub struct Epoch { /// The epoch index. pub epoch_idx: u64, /// The starting slot of the epoch. pub start_slot: Slot, + /// Slot duration in milliseconds. + pub slot_duration: SlotDuration, + /// Duration of epoch in slots. + pub epoch_duration: u64, + /// Authorities for the epoch. + pub authorities: Vec, + /// Randomness for the epoch. + pub randomness: Randomness, /// Epoch configuration. - pub config: SassafrasConfiguration, -} - -/// Configuration data used by the Sassafras consensus engine that can be modified on epoch change. -// TODO-SASS-P3: rename to something better... like LotteryConfig -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct SassafrasEpochConfiguration { - /// Redundancy factor. - pub redundancy_factor: u32, - /// Number of attempts for tickets generation. - pub attempts_number: u32, + pub config: EpochConfiguration, } /// An opaque type used to represent the key ownership proof at the runtime API boundary. +/// /// The inner value is an encoded representation of the actual key ownership proof which will be /// parameterized when defining the runtime. At the runtime API boundary this type is unknown and /// as such we keep this opaque representation, implementors of the runtime API will have to make diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 2bd24a8b16a28..62624676c3df4 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,10 +23,14 @@ use scale_info::TypeInfo; use sp_consensus_slots::Slot; use sp_std::vec::Vec; +pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; + /// Ticket identifier. /// -/// Within the algorithm this is also used as a ticket score applied to bound -/// the ticket to a epoch's slot. +/// Its value is the output of a VRF whose inputs cannot be controlled by the +/// creator of the ticket (refer to [`ticket_id_vrf_input`] parameters). +/// Because of this, it is also used as the ticket score to compare against +/// the epoch ticket's threshold. pub type TicketId = u128; /// Ticket data persisted on-chain. @@ -34,12 +38,13 @@ pub type TicketId = u128; pub struct TicketBody { /// Attempt index. pub attempt_idx: u32, - /// Ed25519 public key which gets erased when claiming the ticket. - pub erased_public: [u8; 32], + /// Ed25519 ephemeral public key representing ticket ownersip. + /// (i.e. whoever has the secret, is the owner) + pub erased_public: EphemeralPublic, } /// Ticket ring vrf signature. -pub type TicketRingSignature = RingVrfSignature; +pub type TicketSignature = RingVrfSignature; /// Ticket envelope used on during submission. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] @@ -47,22 +52,14 @@ pub struct TicketEnvelope { /// Ticket body. pub body: TicketBody, /// Ring signature. - pub ring_signature: TicketRingSignature, -} - -/// Ticket auxiliary information used to claim the ticket ownership. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketSecret { - /// Attempt index. - pub attempt_idx: u32, - /// Ed25519 used to claim ticket ownership. - pub erased_secret: [u8; 32], + pub signature: TicketSignature, } /// Ticket claim information filled by the block author. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketClaim { - pub erased_signature: [u8; 64], + /// Signature to claim ownership of `TicketBody::erased_public`. + pub erased_signature: EphemeralSignature, } fn vrf_input_from_data( @@ -108,7 +105,6 @@ pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> } /// Data to be signed via ring-vrf. -/// TODO davxy: ticket_body is not a vrf input??? pub fn ticket_body_sign_data(ticket_body: &TicketBody) -> VrfSignData { VrfSignData::new_unchecked( &SASSAFRAS_ENGINE_ID, diff --git a/primitives/inherents/src/client_side.rs b/primitives/inherents/src/client_side.rs index 27479de136f2d..2e23221261336 100644 --- a/primitives/inherents/src/client_side.rs +++ b/primitives/inherents/src/client_side.rs @@ -99,9 +99,11 @@ pub trait InherentDataProvider: Send + Sync { /// If the given error could not be decoded, `None` should be returned. async fn try_handle_error( &self, - identifier: &InherentIdentifier, - error: &[u8], - ) -> Option>; + _identifier: &InherentIdentifier, + _error: &[u8], + ) -> Option> { + None + } } #[impl_trait_for_tuples::impl_for_tuples(30)] diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index e1d8353a0e472..48611dccacdb3 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -144,7 +144,7 @@ impl GenesisStorageBuilder { }, sassafras: pallet_sassafras::GenesisConfig { authorities: authorities_bandersnatch.into_iter().map(|x| x.into()).collect(), - epoch_config: sp_consensus_sassafras::SassafrasEpochConfiguration { + epoch_config: sp_consensus_sassafras::EpochConfiguration { redundancy_factor: 1, attempts_number: 32, }, From 93293147f104d7a56952a380e486cebcfe598b7a Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 14 Aug 2023 10:51:10 +0200 Subject: [PATCH 56/62] Tickets were not saved --- client/consensus/sassafras/src/authorship.rs | 13 +++++++++++-- frame/sassafras/src/lib.rs | 10 +++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index f4d4e41b5ea9d..b4291bd64adb2 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -475,8 +475,17 @@ async fn start_tickets_worker( _ => None, }; - if let Some(err) = err { - error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); + match err { + None => { + // Cache tickets secret in the epoch changes tree (TODO: @davxy use the keystre) + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|target_epoch| target_epoch.tickets_aux = epoch.tickets_aux); + }, + Some(err) => { + error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); + }, } } } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 8e3c753546f6c..b65800a9e8374 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -255,11 +255,11 @@ pub mod pallet { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); - // // TODO: davxy... remove for tests - // log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); - // let ring_ctx = RingContext::new_testing(); - // log::warn!(target: LOG_TARGET, "... done"); - // RingVrfContext::::set(Some(ring_ctx.clone())); + // TODO: davxy... remove for tests + log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); + let ring_ctx = RingContext::new_testing(); + log::warn!(target: LOG_TARGET, "... done"); + RingVrfContext::::set(Some(ring_ctx.clone())); } } From f9cdfc1bf0087d127c272462e27e4da509d6c4a3 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 14 Aug 2023 13:15:07 +0200 Subject: [PATCH 57/62] Small note --- frame/sassafras/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index b65800a9e8374..677920a95bf05 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -373,6 +373,7 @@ pub mod pallet { log::debug!(target: LOG_TARGET, "... Loaded"); // TODO @davxy this should be done once per epoch and with the NEXT EPOCH AUTHORITIES!!! + // For this we need the `ProofVerifier` to be serializable @svasilyev let pks: Vec<_> = Self::authorities().iter().map(|auth| *auth.as_ref()).collect(); log::debug!(target: LOG_TARGET, "Building verifier. Ring size {}", pks.len()); let verifier = ring_ctx.verifier(pks.as_slice()).unwrap(); From 85a89da8ab858b98e1a415ecd7f24bf09f07b2b1 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 14 Aug 2023 18:19:39 +0200 Subject: [PATCH 58/62] Move sassafras inherents to client. Not reason to have it as a primitive --- Cargo.lock | 3 --- bin/node-sassafras/node/src/service.rs | 16 ++++++++-------- client/consensus/sassafras/Cargo.toml | 2 +- .../consensus/sassafras/src/inherents.rs | 14 +++----------- client/consensus/sassafras/src/lib.rs | 5 +++-- client/consensus/sassafras/src/tests.rs | 7 ++----- client/consensus/sassafras/src/verification.rs | 1 + primitives/consensus/sassafras/Cargo.toml | 6 ------ primitives/consensus/sassafras/src/lib.rs | 1 - 9 files changed, 18 insertions(+), 37 deletions(-) rename {primitives => client}/consensus/sassafras/src/inherents.rs (89%) diff --git a/Cargo.lock b/Cargo.lock index 04ca9785ab104..ed177c19787d4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10884,7 +10884,6 @@ dependencies = [ name = "sp-consensus-sassafras" version = "0.3.4-dev" dependencies = [ - "async-trait", "parity-scale-codec", "scale-info", "serde", @@ -10892,10 +10891,8 @@ dependencies = [ "sp-application-crypto", "sp-consensus-slots", "sp-core", - "sp-inherents", "sp-runtime", "sp-std", - "sp-timestamp", ] [[package]] diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index e2e0252dcdb47..0ab4b9c041912 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -122,13 +122,10 @@ pub fn new_partial( select_chain.clone(), move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - + let slot = sc_consensus_sassafras::InherentDataProvider::from_timestamp( + *timestamp, + slot_duration, + ); Ok((slot, timestamp)) }, &task_manager.spawn_essential_handle(), @@ -266,7 +263,10 @@ pub fn new_full(config: Configuration) -> Result { force_authoring, create_inherent_data_providers: move |_, _| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = sp_consensus_sassafras::inherents::InherentDataProvider::from_timestamp_and_slot_duration(*timestamp, slot_duration); + let slot = sc_consensus_sassafras::InherentDataProvider::from_timestamp( + *timestamp, + slot_duration, + ); Ok((slot, timestamp)) }, offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index b0a6878e12967..2ecd08ccd2a4b 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -40,6 +40,7 @@ sp-core = { version = "21.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } env_logger = "0.10.0" [dev-dependencies] @@ -47,6 +48,5 @@ sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } -sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tokio = "1.22.0" diff --git a/primitives/consensus/sassafras/src/inherents.rs b/client/consensus/sassafras/src/inherents.rs similarity index 89% rename from primitives/consensus/sassafras/src/inherents.rs rename to client/consensus/sassafras/src/inherents.rs index 70025267fa6b4..372a7a85eedf2 100644 --- a/primitives/consensus/sassafras/src/inherents.rs +++ b/client/consensus/sassafras/src/inherents.rs @@ -18,7 +18,7 @@ //! Sassafras inherents structures and helpers. use sp_inherents::{Error, InherentData, InherentIdentifier}; -use sp_std::result::Result; +use std::ops::Deref; /// Inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; @@ -53,7 +53,6 @@ impl SassafrasInherentData for InherentData { /// Provides the slot duration inherent data. pub struct InherentDataProvider(InherentType); -#[cfg(feature = "std")] impl InherentDataProvider { /// Create new inherent data provider from the given `slot`. pub fn new(slot: InherentType) -> Self { @@ -62,21 +61,15 @@ impl InherentDataProvider { /// Creates the inherent data provider by calculating the slot from the given /// `timestamp` and `duration`. - pub fn from_timestamp_and_slot_duration( + pub fn from_timestamp( timestamp: sp_timestamp::Timestamp, slot_duration: sp_consensus_slots::SlotDuration, ) -> Self { Self(InherentType::from_timestamp(timestamp, slot_duration)) } - - /// Returns the `slot` of this inherent data provider. - pub fn slot(&self) -> InherentType { - self.0 - } } -#[cfg(feature = "std")] -impl sp_std::ops::Deref for InherentDataProvider { +impl Deref for InherentDataProvider { type Target = InherentType; fn deref(&self) -> &Self::Target { @@ -84,7 +77,6 @@ impl sp_std::ops::Deref for InherentDataProvider { } } -#[cfg(feature = "std")] #[async_trait::async_trait] impl sp_inherents::InherentDataProvider for InherentDataProvider { async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index e3dfbce023ef0..46999c40ab83c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -65,7 +65,7 @@ use sp_consensus::{ }; use sp_consensus_slots::Slot; use sp_core::Pair; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider as _}; use sp_keystore::KeystorePtr; use sp_runtime::{ generic::OpaqueDigestItemId, @@ -76,7 +76,6 @@ use sp_runtime::{ // Re-export some primitives. pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, - inherents::SassafrasInherentData, slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id_vrf_input, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, @@ -86,6 +85,7 @@ pub use sp_consensus_sassafras::{ mod authorship; mod aux_schema; mod block_import; +mod inherents; #[cfg(test)] mod tests; mod verification; @@ -94,6 +94,7 @@ mod verification; pub use authorship::{start_sassafras, SassafrasWorker, SassafrasWorkerParams}; pub use aux_schema::revert; pub use block_import::{block_import, SassafrasBlockImport}; +pub use inherents::{InherentDataProvider, InherentType}; pub use verification::SassafrasVerifier; const LOG_TARGET: &str = "sassafras 🌳"; diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 23526c007fcb9..96f81a38fc042 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -35,7 +35,7 @@ use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; use sp_application_crypto::key_types::SASSAFRAS; use sp_blockchain::Error as TestError; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; -use sp_consensus_sassafras::{inherents::InherentDataProvider, EphemeralPublic, SlotDuration}; +use sp_consensus_sassafras::{EphemeralPublic, SlotDuration}; use sp_core::crypto::UncheckedFrom; use sp_keyring::BandersnatchKeyring as Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -146,10 +146,7 @@ fn create_test_verifier( ) -> SassafrasVerifier { let slot_duration = config.slot_duration; let create_inherent_data_providers = Box::new(move |_, _| async move { - let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - slot_duration, - ); + let slot = InherentDataProvider::from_timestamp(Timestamp::current(), slot_duration); Ok((slot,)) }); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 29abffe591a15..3d7f0c8659366 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -19,6 +19,7 @@ //! Types and functions related to block verification. use super::*; +use crate::inherents::SassafrasInherentData; use sp_core::{ crypto::{VrfPublic, Wraps}, ed25519::Pair as EphemeralPair, diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 8d687aabee00b..14de323c73617 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -15,7 +15,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.163", default-features = false, features = ["derive"], optional = true } @@ -23,15 +22,12 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } sp-core = { version = "21.0.0", default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } -sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } sp-std = { version = "8.0.0", default-features = false, path = "../../std" } -sp-timestamp = { version = "4.0.0-dev", optional = true, path = "../../timestamp" } [features] default = ["std"] std = [ - "async-trait", "scale-codec/std", "scale-info/std", "serde/std", @@ -39,10 +35,8 @@ std = [ "sp-application-crypto/std", "sp-consensus-slots/std", "sp-core/std", - "sp-inherents/std", "sp-runtime/std", "sp-std/std", - "sp-timestamp", ] # Serde support without relying on std features. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c0d3725ea7154..c900ea87d09ff 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -37,7 +37,6 @@ pub use sp_core::bandersnatch::{ use serde::{Deserialize, Serialize}; pub mod digests; -pub mod inherents; pub mod ticket; pub use ticket::{ From 3eb0d428ad7803597cf04a3cc5d60d570432f885 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 16 Aug 2023 17:31:07 +0200 Subject: [PATCH 59/62] Improve ticket claiming mechanism --- client/consensus/sassafras/src/authorship.rs | 92 +++++++++++------ .../consensus/sassafras/src/block_import.rs | 2 +- client/consensus/sassafras/src/lib.rs | 13 ++- client/consensus/sassafras/src/tests.rs | 16 ++- .../consensus/sassafras/src/verification.rs | 67 ++++++++----- frame/sassafras/src/lib.rs | 72 +++++++------- frame/sassafras/src/mock.rs | 20 ++-- primitives/consensus/sassafras/src/lib.rs | 6 +- primitives/consensus/sassafras/src/ticket.rs | 71 +------------ primitives/consensus/sassafras/src/vrf.rs | 99 +++++++++++++++++++ primitives/core/src/bandersnatch.rs | 2 +- 11 files changed, 275 insertions(+), 185 deletions(-) create mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index b4291bd64adb2..8bb5fe95cc989 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -22,8 +22,8 @@ use super::*; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_sassafras::{ - digests::PreDigest, slot_claim_sign_data, ticket_id, ticket_id_threshold, AuthorityId, Slot, - TicketBody, TicketClaim, TicketEnvelope, TicketId, + digests::PreDigest, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, + TicketEnvelope, TicketId, }; use sp_core::{ bandersnatch::ring_vrf::RingContext, ed25519::Pair as EphemeralPair, twox_64, ByteArray, @@ -48,21 +48,37 @@ pub(crate) fn claim_slot( return None } - let mut vrf_sign_data = slot_claim_sign_data(&epoch.randomness, slot, epoch.epoch_idx); + let mut vrf_sign_data = vrf::slot_claim_sign_data(&epoch.randomness, slot, epoch.epoch_idx); let (authority_idx, ticket_claim) = match maybe_ticket { - Some((ticket_id, ticket_data)) => { - log::debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:016x})]"); + Some((ticket_id, ticket_body)) => { + debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:016x})]"); + + // TODO @davxy... this is annoying. + // If we lose the secret cache then to know if we are the ticket owner then looks + // like we need to regenerate the ticket-id using all our keys and check if the + // output matches with the onchain one... + // Is there a better way??? let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?; - log::debug!( + debug!( target: LOG_TARGET, " got ticket: auth: {}, attempt: {}", authority_idx, - ticket_data.attempt_idx + ticket_body.attempt_idx ); - vrf_sign_data.push_transcript_data(&ticket_data.encode()); + vrf_sign_data.push_transcript_data(&ticket_body.encode()); + let reveal_vrf_input = vrf::revealed_key_input( + &epoch.randomness, + ticket_body.attempt_idx, + epoch.epoch_idx, + ); + vrf_sign_data + .push_vrf_input(reveal_vrf_input) + .expect("Sign data has enough space; qed"); + + // Sign some data using the erased key to enforce our ownership let data = vrf_sign_data.challenge::<32>(); let erased_pair = EphemeralPair::from_seed(&ticket_secret.seed); let erased_signature = erased_pair.sign(&data); @@ -71,7 +87,7 @@ pub(crate) fn claim_slot( (authority_idx, Some(claim)) }, None => { - log::debug!(target: LOG_TARGET, "[TRY SECONDARY (slot {slot})]"); + debug!(target: LOG_TARGET, "[TRY SECONDARY (slot {slot})]"); (secondary_authority_index(slot, epoch), None) }, }; @@ -83,6 +99,10 @@ pub(crate) fn claim_slot( .ok() .flatten()?; + if let Some(output) = vrf_signature.outputs.get(1) { + warn!(target: LOG_TARGET, "{:?}", output); + } + let pre_digest = PreDigest { authority_idx, slot, vrf_signature, ticket_claim }; Some((pre_digest, authority_id.clone())) @@ -106,13 +126,14 @@ fn generate_epoch_tickets( epoch.authorities.len() as u32, ); // TODO-SASS-P4 remove me - log::debug!(target: LOG_TARGET, "Generating tickets for epoch {} @ slot {}", epoch.epoch_idx, epoch.start_slot); - log::debug!(target: LOG_TARGET, " threshold: {threshold:016x}"); + debug!(target: LOG_TARGET, "Generating tickets for epoch {} @ slot {}", epoch.epoch_idx, epoch.start_slot); + debug!(target: LOG_TARGET, " threshold: {threshold:016x}"); // We need a list of raw unwrapped keys let pks: Vec<_> = epoch.authorities.iter().map(|a| *a.as_ref()).collect(); - let mut tickets_aux = Vec::new(); + let tickets_aux = &mut epoch.tickets_aux; + let epoch = &epoch.inner; for (authority_idx, authority_id) in epoch.authorities.iter().enumerate() { if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { @@ -124,27 +145,39 @@ fn generate_epoch_tickets( debug!(target: LOG_TARGET, ">>> ...done"); let make_ticket = |attempt_idx| { - let vrf_input = ticket_id_vrf_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); - - let vrf_preout = keystore - .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &vrf_input) + // Ticket id and threshold check. + let ticket_id_input = + vrf::ticket_id_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); + let ticket_id_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &ticket_id_input) .ok()??; - - let ticket_id = ticket_id(&vrf_input, &vrf_preout); + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); if ticket_id >= threshold { return None } - // @davxy TODO: why not generate from seed. - // Seed computed as f(pair.seed || ticket_id) + // Erased key. + // TODO: @davxy maybe we can we make this as: + // part1 = OsRand() // stored in memory + // part2 = make_erased_seed(&seed_vrf_input, seed_vrf_output) // reproducible from auth + // erased_seed = hash(part1 ++ part2) + // In this way is not reproducible and not full secret is in memory let (erased_pair, erased_seed) = EphemeralPair::generate(); - let erased_public = erased_pair.public(); - let body = TicketBody { attempt_idx, erased_public }; + + // Revealed key. + let revealed_input = + vrf::revealed_key_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); + let revealed_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &revealed_input) + .ok()??; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; debug!(target: LOG_TARGET, ">>> Creating ring proof for attempt {}", attempt_idx); - let mut sign_data = ticket_body_sign_data(&body); - sign_data.push_vrf_input(vrf_input).expect("Can't fail"); + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); let signature = keystore .bandersnatch_ring_vrf_sign( @@ -156,25 +189,22 @@ fn generate_epoch_tickets( .ok()??; debug!(target: LOG_TARGET, ">>> ...done"); - let ticket_envelope = TicketEnvelope { body, signature }; + debug_assert_eq!(ticket_id_output, signature.outputs[0]); + let ticket_envelope = TicketEnvelope { body, signature }; let ticket_secret = TicketSecret { attempt_idx, seed: erased_seed }; - Some((ticket_id, ticket_envelope, ticket_secret)) }; for attempt in 0..epoch.config.attempts_number { if let Some((ticket_id, ticket_envelope, ticket_secret)) = make_ticket(attempt) { - log::debug!(target: LOG_TARGET, " → {ticket_id:016x}"); + debug!(target: LOG_TARGET, " → {ticket_id:016x}"); tickets.push(ticket_envelope); - tickets_aux.push((ticket_id, authority_idx as u32, ticket_secret)); + tickets_aux.insert(ticket_id, (authority_idx as u32, ticket_secret)); } } } - tickets_aux.into_iter().for_each(|(ticket_id, authority_idx, ticket_secret)| { - epoch.tickets_aux.insert(ticket_id, (authority_idx, ticket_secret)); - }); tickets } diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index c11daf58ba3f5..5efc6e3f9b220 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -170,7 +170,7 @@ where epoch_data.epoch_idx += skipped_epochs; epoch_data.start_slot = Slot::from(*epoch_data.start_slot + skipped_epochs * epoch_data.epoch_duration); - log::warn!( + warn!( target: LOG_TARGET, "Epoch(s) skipped from {} to {}", original_epoch_idx, diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 46999c40ab83c..4c03e201ca957 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -76,8 +76,7 @@ use sp_runtime::{ // Re-export some primitives. pub use sp_consensus_sassafras::{ digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, - slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id_vrf_input, - AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, + vrf, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; @@ -141,6 +140,12 @@ pub enum Error { /// VRF verification failed #[error("VRF verification failed")] VrfVerificationFailed, + /// Missing VRF output entry in the signature + #[error("Missing signed VRF output")] + MissingSignedVrfOutput, + /// Mismatch during verification of reveal public + #[error("Reveal public mismatch")] + RevealPublicMismatch, /// Unexpected authoring mechanism #[error("Unexpected authoring mechanism")] UnexpectedAuthoringMechanism, @@ -293,10 +298,10 @@ pub struct SassafrasIntermediate { fn find_pre_digest(header: &B::Header) -> Result> { if header.number().is_zero() { // Genesis block doesn't contain a pre digest so let's generate a - // dummy one to not break any invariants in the rest of the code + // dummy one to not break any invariant in the rest of the code. use sp_core::crypto::VrfSecret; let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); - let data = sp_consensus_sassafras::slot_claim_sign_data(&Default::default(), 0.into(), 0); + let data = vrf::slot_claim_sign_data(&Default::default(), 0.into(), 0); let vrf_signature = pair.as_ref().vrf_sign(&data); return Ok(PreDigest { authority_idx: 0, slot: 0.into(), ticket_claim: None, vrf_signature }) } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 96f81a38fc042..8f8fe89b086a8 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -303,10 +303,13 @@ impl TestContext { // TODO DAVXY: here maybe we can use the epoch.randomness??? let epoch = self.epoch_data(&parent_hash, parent_number, slot); - let data = - slot_claim_sign_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); - let vrf_signature = - self.keystore.bandersnatch_vrf_sign(SASSAFRAS, &public, &data).unwrap().unwrap(); + let sign_data = + vrf::slot_claim_sign_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let vrf_signature = self + .keystore + .bandersnatch_vrf_sign(SASSAFRAS, &public, &sign_data) + .unwrap() + .unwrap(); let pre_digest = PreDigest { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; let digest = sp_runtime::generic::Digest { @@ -363,6 +366,8 @@ impl TestContext { fn tests_assumptions_sanity_check() { let env = TestContext::new(); assert_eq!(env.link.genesis_config, create_test_epoch()); + // Protocol needs at least two VRF ios + assert!(sp_core::bandersnatch::vrf::MAX_VRF_IOS >= 2); } #[test] @@ -415,7 +420,8 @@ fn claim_primary_slots_works() { let ticket_id = 123; let erased_public = EphemeralPublic::unchecked_from([0; 32]); - let ticket_body = TicketBody { attempt_idx: 0, erased_public }; + let revealed_public = erased_public.clone(); + let ticket_body = TicketBody { attempt_idx: 0, erased_public, revealed_public }; let ticket_secret = TicketSecret { attempt_idx: 0, seed: [0; 32] }; // Fail if we have authority key in our keystore but not ticket aux data diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 3d7f0c8659366..ff2d026a38829 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -95,51 +95,66 @@ fn check_header( // Optionally check ticket ownership - let mut vrf_sign_data = - slot_claim_sign_data(&epoch.randomness, pre_digest.slot, epoch.epoch_idx); + let mut sign_data = + vrf::slot_claim_sign_data(&epoch.randomness, pre_digest.slot, epoch.epoch_idx); match (&maybe_ticket, &pre_digest.ticket_claim) { - (Some((_ticket_id, ticket_data)), Some(ticket_claim)) => { - log::debug!(target: LOG_TARGET, "checking primary"); - - vrf_sign_data.push_transcript_data(&ticket_data.encode()); - let challenge = vrf_sign_data.challenge::<32>(); - - if !EphemeralPair::verify( - &ticket_claim.erased_signature, - &challenge, - &ticket_data.erased_public, - ) { - return Err(sassafras_err(Error::BadSignature(pre_hash))) + (Some((_ticket_id, ticket_body)), ticket_claim) => { + debug!(target: LOG_TARGET, "checking primary"); + + sign_data.push_transcript_data(&ticket_body.encode()); + + // Revealed key check + let revealed_input = vrf::revealed_key_input( + &epoch.randomness, + ticket_body.attempt_idx, + epoch.epoch_idx, + ); + let revealed_output = pre_digest + .vrf_signature + .outputs + .get(1) + .ok_or_else(|| sassafras_err(Error::MissingSignedVrfOutput))?; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + if revealed_public != ticket_body.revealed_public { + return Err(sassafras_err(Error::RevealPublicMismatch)) + } + sign_data.push_vrf_input(revealed_input).expect("Can't fail; qed"); + + if let Some(ticket_claim) = ticket_claim { + // Optional check, increases some score... + let challenge = sign_data.challenge::<32>(); + if !EphemeralPair::verify( + &ticket_claim.erased_signature, + &challenge, + &ticket_body.erased_public, + ) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } } }, (None, None) => { - log::debug!(target: LOG_TARGET, "checking secondary"); + debug!(target: LOG_TARGET, "checking secondary"); let idx = authorship::secondary_authority_index(pre_digest.slot, epoch); if idx != pre_digest.authority_idx { - log::error!(target: LOG_TARGET, "Bad secondary authority index"); + error!(target: LOG_TARGET, "Bad secondary authority index"); return Err(Error::SlotAuthorNotFound) } }, - (Some(_), None) => { - log::warn!(target: LOG_TARGET, "Unexpected secondary authoring mechanism"); - return Err(Error::UnexpectedAuthoringMechanism) - }, (None, Some(_)) => if origin != BlockOrigin::NetworkInitialSync { - log::warn!(target: LOG_TARGET, "Unexpected primary authoring mechanism"); + warn!(target: LOG_TARGET, "Unexpected primary authoring mechanism"); return Err(Error::UnexpectedAuthoringMechanism) }, } // Check per-slot vrf proof - - if !authority_id - .as_inner_ref() - .vrf_verify(&vrf_sign_data, &pre_digest.vrf_signature) - { + if !authority_id.as_inner_ref().vrf_verify(&sign_data, &pre_digest.vrf_signature) { + warn!(target: LOG_TARGET, ">>> VERIFICATION FAILED (pri = {})!!!", maybe_ticket.is_some()); return Err(sassafras_err(Error::VrfVerificationFailed)) } + warn!(target: LOG_TARGET, ">>> VERIFICATION OK (pri = {})!!!", maybe_ticket.is_some()); let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal }; diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 677920a95bf05..585a21ee3ada2 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -47,6 +47,7 @@ #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +use log::{debug, error, warn}; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -57,7 +58,7 @@ use frame_system::{ }; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, RingContext, Slot, + vrf, AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, RingContext, Slot, SlotDuration, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; @@ -255,10 +256,10 @@ pub mod pallet { Pallet::::initialize_genesis_authorities(&self.authorities); EpochConfig::::put(self.epoch_config.clone()); - // TODO: davxy... remove for tests - log::warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); + // TODO: davxy... remove for pallet tests + warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); let ring_ctx = RingContext::new_testing(); - log::warn!(target: LOG_TARGET, "... done"); + warn!(target: LOG_TARGET, "... done"); RingVrfContext::::set(Some(ring_ctx.clone())); } } @@ -293,7 +294,7 @@ pub mod pallet { // On the first non-zero block (i.e. block #1) this is where the first epoch // (epoch #0) actually starts. We need to adjust internal storage accordingly. if *GenesisSlot::::get() == 0 { - log::debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", pre_digest.slot); + debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", pre_digest.slot); Self::initialize_genesis_epoch(pre_digest.slot) } @@ -316,18 +317,18 @@ pub mod pallet { let pre_digest = Initialized::::take() .expect("Finalization is called after initialization; qed."); - let vrf_input = sp_consensus_sassafras::slot_claim_vrf_input( + let claim_input = vrf::slot_claim_input( &Self::randomness(), CurrentSlot::::get(), EpochIndex::::get(), ); - - let randomness = pre_digest + let claim_output = pre_digest .vrf_signature .outputs .get(0) - .expect("vrf preout should have been already checked by the client; qed") - .make_bytes::(RANDOMNESS_VRF_CONTEXT, &vrf_input); + .expect("Presence should have been already checked by the client; qed"); + let randomness = + claim_output.make_bytes::(RANDOMNESS_VRF_CONTEXT, &claim_input); Self::deposit_randomness(&randomness); @@ -364,20 +365,20 @@ pub mod pallet { ) -> DispatchResult { ensure_none(origin)?; - log::debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); - log::debug!(target: LOG_TARGET, "LOADING RING CTX"); + debug!(target: LOG_TARGET, "LOADING RING CTX"); let Some(ring_ctx) = RingVrfContext::::get() else { return Err("Ring context not initialized".into()) }; - log::debug!(target: LOG_TARGET, "... Loaded"); + debug!(target: LOG_TARGET, "... Loaded"); // TODO @davxy this should be done once per epoch and with the NEXT EPOCH AUTHORITIES!!! // For this we need the `ProofVerifier` to be serializable @svasilyev let pks: Vec<_> = Self::authorities().iter().map(|auth| *auth.as_ref()).collect(); - log::debug!(target: LOG_TARGET, "Building verifier. Ring size {}", pks.len()); + debug!(target: LOG_TARGET, "Building verifier. Ring size {}", pks.len()); let verifier = ring_ctx.verifier(pks.as_slice()).unwrap(); - log::debug!(target: LOG_TARGET, "... Built"); + debug!(target: LOG_TARGET, "... Built"); // Check tickets score let next_auth = NextAuthorities::::get(); @@ -397,26 +398,21 @@ pub mod pallet { let mut segment = BoundedVec::with_max_capacity(); for ticket in tickets { - log::debug!(target: LOG_TARGET, "Checking ring proof"); - - let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input( - &randomness, - ticket.body.attempt_idx, - epoch_idx, - ); + debug!(target: LOG_TARGET, "Checking ring proof"); - let Some(vrf_preout) = ticket.signature.outputs.get(0) else { - log::debug!(target: LOG_TARGET, "Missing ticket pre-output from ring signature"); + let ticket_id_input = + vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + let Some(ticket_id_output) = ticket.signature.outputs.get(0) else { + debug!(target: LOG_TARGET, "Missing ticket vrf output from ring signature"); continue }; - let ticket_id = sp_consensus_sassafras::ticket_id(&vrf_input, &vrf_preout); + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); if ticket_id >= ticket_threshold { - log::debug!(target: LOG_TARGET, "Over threshold"); + debug!(target: LOG_TARGET, "Over threshold"); continue } - let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&ticket.body); - sign_data.push_vrf_input(vrf_input).expect("Can't fail"); + let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); if ticket.signature.verify(&sign_data, &verifier) { TicketsData::::set(ticket_id, Some(ticket.body)); @@ -424,13 +420,13 @@ pub mod pallet { .try_push(ticket_id) .expect("has same length as bounded input vector; qed"); } else { - log::debug!(target: LOG_TARGET, "Proof verification failure"); + debug!(target: LOG_TARGET, "Proof verification failure"); } } if !segment.is_empty() { - log::debug!(target: LOG_TARGET, "Appending segment with {} tickets", segment.len()); - segment.iter().for_each(|t| log::debug!(target: LOG_TARGET, " + {t:16x}")); + debug!(target: LOG_TARGET, "Appending segment with {} tickets", segment.len()); + segment.iter().for_each(|t| debug!(target: LOG_TARGET, " + {t:16x}")); let mut metadata = TicketsMeta::::get(); NextTicketsSegments::::insert(metadata.segments_count, segment); metadata.segments_count += 1; @@ -500,7 +496,7 @@ pub mod pallet { fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_tickets { tickets } = call { // Discard tickets not coming from the local node - log::debug!( + debug!( target: LOG_TARGET, "Validating unsigned from {} source", match source { @@ -520,7 +516,7 @@ pub mod pallet { // A) The current epoch validators // B) The next epoch validators // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) - log::warn!( + warn!( target: LOG_TARGET, "Rejecting unsigned transaction from external sources.", ); @@ -532,7 +528,7 @@ pub mod pallet { let current_slot_idx = Self::current_slot_index(); if current_slot_idx >= epoch_duration / 2 { - log::warn!(target: LOG_TARGET, "Timeout to propose tickets, bailing out.",); + warn!(target: LOG_TARGET, "Timeout to propose tickets, bailing out.",); return InvalidTransaction::Stale.into() } @@ -676,7 +672,7 @@ impl Pallet { Self::reset_tickets_data(); let skipped_epochs = u64::from(slot_idx) / T::EpochDuration::get(); epoch_idx += skipped_epochs; - log::warn!(target: LOG_TARGET, "Detected {} skipped epochs, resuming from epoch {}", skipped_epochs, epoch_idx); + warn!(target: LOG_TARGET, "Detected {} skipped epochs, resuming from epoch {}", skipped_epochs, epoch_idx); } let mut tickets_metadata = TicketsMeta::::get(); @@ -872,7 +868,7 @@ impl Pallet { } else { 2 * (duration - (slot_idx + 1)) }; - log::debug!( + debug!( target: LOG_TARGET, ">>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", slot_idx, @@ -986,7 +982,7 @@ impl Pallet { match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(_) => true, Err(e) => { - log::error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); + error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); false }, } @@ -1007,7 +1003,7 @@ impl Pallet { match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(()) => true, Err(e) => { - log::error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e); + error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e); false }, } diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index ee234749bf9dc..d19a5cdebacd9 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -164,13 +164,12 @@ fn make_ticket_with_prover( let mut raw: [u8; 32] = [0; 32]; raw.copy_from_slice(&pair.public().as_slice()[0..32]); let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public.clone(); - let body = TicketBody { attempt_idx: attempt, erased_public }; + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); - let mut sign_data = sp_consensus_sassafras::ticket_body_sign_data(&body); - - let vrf_input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt, epoch); - sign_data.push_vrf_input(vrf_input).unwrap(); + let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); @@ -219,10 +218,10 @@ pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, Ti let epoch = Sassafras::epoch_index() + 1; let randomness = Sassafras::next_randomness(); - let input = sp_consensus_sassafras::ticket_id_vrf_input(&randomness, attempt_idx, epoch); - let output = pair.as_inner_ref().vrf_output(&input); + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_output = pair.as_inner_ref().vrf_output(&ticket_id_input); - let id = sp_consensus_sassafras::ticket_id(&input, &output); + let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); // Make a dummy ephemeral public that hopefully is unique within one test instance. // In the tests, the values within the erased public are just used to compare @@ -231,8 +230,9 @@ pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, Ti raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); raw[16..].copy_from_slice(&id.to_le_bytes()); let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public.clone(); - let body = TicketBody { attempt_idx, erased_public }; + let body = TicketBody { attempt_idx, erased_public, revealed_public }; (id, body) } @@ -292,7 +292,7 @@ fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { randomness = crate::NextRandomness::::get(); } - let data = sp_consensus_sassafras::slot_claim_sign_data(&randomness, slot, epoch); + let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); pair.as_ref().vrf_sign(&data) } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c900ea87d09ff..ef4a1f2606a06 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -38,11 +38,11 @@ use serde::{Deserialize, Serialize}; pub mod digests; pub mod ticket; +pub mod vrf; pub use ticket::{ - slot_claim_sign_data, slot_claim_vrf_input, ticket_body_sign_data, ticket_id, - ticket_id_threshold, ticket_id_vrf_input, EphemeralPublic, EphemeralSignature, TicketBody, - TicketClaim, TicketEnvelope, TicketId, + ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketClaim, + TicketEnvelope, TicketId, }; mod app { diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 62624676c3df4..28ae572b72d71 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -17,11 +17,9 @@ //! Primitives related to tickets. -use crate::{Randomness, RingVrfSignature, VrfInput, VrfOutput, VrfSignData, SASSAFRAS_ENGINE_ID}; +use crate::RingVrfSignature; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_consensus_slots::Slot; -use sp_std::vec::Vec; pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; @@ -38,9 +36,10 @@ pub type TicketId = u128; pub struct TicketBody { /// Attempt index. pub attempt_idx: u32, - /// Ed25519 ephemeral public key representing ticket ownersip. - /// (i.e. whoever has the secret, is the owner) + /// Ephemeral public key which gets erased when the ticket is claimed. pub erased_public: EphemeralPublic, + /// Ephemeral public key which gets exposed when the ticket is claimed. + pub revealed_public: EphemeralPublic, } /// Ticket ring vrf signature. @@ -58,70 +57,10 @@ pub struct TicketEnvelope { /// Ticket claim information filled by the block author. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketClaim { - /// Signature to claim ownership of `TicketBody::erased_public`. + /// Signature verified via `TicketBody::erased_public`. pub erased_signature: EphemeralSignature, } -fn vrf_input_from_data( - domain: &[u8], - data: impl IntoIterator>, -) -> VrfInput { - let raw = data.into_iter().fold(Vec::new(), |mut v, e| { - let bytes = e.as_ref(); - v.extend_from_slice(bytes); - let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); - v.extend_from_slice(&len.to_le_bytes()); - v - }); - VrfInput::new(domain, raw) -} - -/// VRF input to claim slot ownership during block production. -/// -/// Input randomness is current epoch randomness. -pub fn slot_claim_vrf_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-claim-v1.0", - [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], - ) -} - -/// Signing-data to claim slot ownership during block production. -/// -/// Input randomness is current epoch randomness. -pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { - let vrf_input = slot_claim_vrf_input(randomness, slot, epoch); - VrfSignData::new_unchecked(&SASSAFRAS_ENGINE_ID, Some("slot-claim-transcript"), Some(vrf_input)) -} - -/// VRF input to generate the ticket id. -/// -/// Input randomness is current epoch randomness. -pub fn ticket_id_vrf_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-ticket-v1.0", - [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], - ) -} - -/// Data to be signed via ring-vrf. -pub fn ticket_body_sign_data(ticket_body: &TicketBody) -> VrfSignData { - VrfSignData::new_unchecked( - &SASSAFRAS_ENGINE_ID, - &[b"ticket-body-transcript", ticket_body.encode().as_slice()], - [], - ) -} - -/// Get ticket-id for a given vrf input and output. -/// -/// Input generally obtained via `ticket_id_vrf_input`. -/// Output can be obtained directly using the vrf secret key or from the signature. -pub fn ticket_id(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> TicketId { - let bytes = vrf_output.make_bytes::<16>(b"vrf-out", vrf_input); - u128::from_le_bytes(bytes) -} - /// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: /// - x: redundancy factor; /// - s: number of slots in epoch; diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..e32e739a53c87 --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities related to VRF input, output and signatures. + +use crate::{ + Randomness, TicketBody, TicketId, VrfInput, VrfOutput, VrfSignData, SASSAFRAS_ENGINE_ID, +}; +use scale_codec::Encode; +use sp_consensus_slots::Slot; +use sp_std::vec::Vec; + +pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; + +fn vrf_input_from_data( + domain: &[u8], + data: impl IntoIterator>, +) -> VrfInput { + let raw = data.into_iter().fold(Vec::new(), |mut v, e| { + let bytes = e.as_ref(); + v.extend_from_slice(bytes); + let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); + v.extend_from_slice(&len.to_le_bytes()); + v + }); + VrfInput::new(domain, raw) +} + +/// VRF input to claim slot ownership during block production. +pub fn slot_claim_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-claim-v1.0", + [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// Signing-data to claim slot ownership during block production. +pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { + let vrf_input = slot_claim_input(randomness, slot, epoch); + VrfSignData::new_unchecked(&SASSAFRAS_ENGINE_ID, Some("slot-claim-transcript"), Some(vrf_input)) +} + +/// VRF input to generate the ticket id. +pub fn ticket_id_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-ticket-v1.0", + [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// VRF input to generate the revealed key. +pub fn revealed_key_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-revealed-v1.0", + [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// Data to be signed via ring-vrf. +pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput) -> VrfSignData { + VrfSignData::new_unchecked( + &SASSAFRAS_ENGINE_ID, + &[b"ticket-body-transcript", ticket_body.encode().as_slice()], + Some(ticket_id_input), + ) +} + +/// Make ticket-id from the given VRF input and output. +/// +/// Input should have been obtained via [`ticket_id_input`]. +/// Output should have been obtained from the input directly using the vrf secret key +/// or from the vrf signature outputs. +pub fn make_ticket_id(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> TicketId { + let bytes = vrf_output.make_bytes::<16>(b"ticket-id", vrf_input); + u128::from_le_bytes(bytes) +} + +/// Make revealed key seed from a given VRF input and ouput. +/// +/// Input should have been obtained via [`revealed_key_input`]. +/// Output should have been obtained from the input directly using the vrf secret key +/// or from the vrf signature outputs. +pub fn make_revealed_key_seed(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> [u8; 32] { + vrf_output.make_bytes::<32>(b"revealed-seed", vrf_input) +} diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index d87b3ee232df9..3a58bc48fc889 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -212,7 +212,7 @@ impl sp_std::fmt::Debug for Signature { /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. #[cfg(feature = "full_crypto")] -type Seed = [u8; SEED_SERIALIZED_LEN]; +pub type Seed = [u8; SEED_SERIALIZED_LEN]; /// Bandersnatch secret key. #[cfg(feature = "full_crypto")] From 4ff716420e7a0d33d4be4964b71df7e6675e0c1e Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 18 Aug 2023 09:46:57 +0200 Subject: [PATCH 60/62] Fix after master merge --- Cargo.lock | 155 ++++++++++++++++++ bin/node-sassafras/node/src/service.rs | 2 +- client/consensus/sassafras/src/authorship.rs | 18 +- .../consensus/sassafras/src/block_import.rs | 9 +- client/consensus/sassafras/src/lib.rs | 10 +- .../consensus/sassafras/src/verification.rs | 4 +- frame/sassafras/Cargo.toml | 2 +- 7 files changed, 171 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cca0a018ca72..e8cf7f6af2d68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5716,6 +5716,84 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.4-dev" +dependencies = [ + "clap 4.3.2", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-offchain", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.4-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -7408,6 +7486,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.4-dev" +dependencies = [ + "array-bytes", + "env_logger 0.10.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -9668,6 +9767,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.4-dev" +dependencies = [ + "async-trait", + "env_logger 0.10.0", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "sc-transaction-pool-api", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -11302,6 +11440,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.4-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -12266,6 +12419,7 @@ dependencies = [ "log", "pallet-babe", "pallet-balances", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -12282,6 +12436,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-genesis-builder", diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs index 0ab4b9c041912..0dfc72edb9fcf 100644 --- a/bin/node-sassafras/node/src/service.rs +++ b/bin/node-sassafras/node/src/service.rs @@ -49,7 +49,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sc_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( sc_consensus_sassafras::SassafrasBlockImport, diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 8bb5fe95cc989..52386c4715366 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -229,8 +229,8 @@ where C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, C::Api: SassafrasApi, E: Environment + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Clone + Sync, L: sc_consensus::JustificationSyncLink, ER: std::error::Error + Send + 'static, @@ -322,13 +322,10 @@ where header: B::Header, header_hash: &B::Hash, body: Vec, - storage_changes: StorageChanges<>::Transaction, B>, + storage_changes: StorageChanges, (_, public): Self::Claim, epoch_descriptor: Self::AuxData, - ) -> Result< - sc_consensus::BlockImportParams>::Transaction>, - ConsensusError, - > { + ) -> Result, ConsensusError> { // TODO DAVXY SASS-32: this seal may be revisited. // We already have a VRF signature, this could be completelly redundant. // The header.hash() can be added to the VRF signed data. @@ -610,11 +607,8 @@ where C::Api: SassafrasApi, SC: SelectChain + 'static, EN: Environment + Send + Sync + 'static, - EN::Proposer: Proposer>, - I: BlockImport> - + Send - + Sync - + 'static, + EN::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, L: sc_consensus::JustificationSyncLink + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 5efc6e3f9b220..758068218f4d6 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -235,7 +235,7 @@ where impl SassafrasBlockImport where Block: BlockT, - Inner: BlockImport> + Send + Sync, + Inner: BlockImport + Send + Sync, Inner::Error: Into, Client: HeaderBackend + HeaderMetadata @@ -251,7 +251,7 @@ where /// end up in an inconsistent state and have to resync async fn import_state( &mut self, - mut block: BlockImportParams>, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let parent_hash = *block.header.parent_hash(); @@ -305,7 +305,7 @@ where impl BlockImport for SassafrasBlockImport where Block: BlockT, - Inner: BlockImport> + Send + Sync, + Inner: BlockImport + Send + Sync, Inner::Error: Into, Client: HeaderBackend + HeaderMetadata @@ -316,11 +316,10 @@ where Client::Api: SassafrasApi + ApiExt, { type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; async fn import_block( &mut self, - mut block: BlockImportParams, + mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); let number = *block.header.number(); diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4c03e201ca957..affa0887e4320 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -371,7 +371,7 @@ pub fn import_queue( spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, telemetry: Option, -) -> ClientResult> +) -> ClientResult> where Client: ProvideRuntimeApi + HeaderBackend @@ -381,13 +381,7 @@ where + Sync + 'static, Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, - BI: BlockImport< - Block, - Error = ConsensusError, - Transaction = sp_api::TransactionFor, - > + Send - + Sync - + 'static, + BI: BlockImport + Send + Sync + 'static, SelectChain: sp_consensus::SelectChain + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index ff2d026a38829..4eed31fd21b9e 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -329,8 +329,8 @@ where { async fn verify( &mut self, - mut block: BlockImportParams, - ) -> Result, String> { + mut block: BlockImportParams, + ) -> Result, String> { trace!( target: LOG_TARGET, "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index d30c38d7d2979..ea25275ba5c91 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { version = "24.0.0", default-features = false, path = "../../primi sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -array-bytes = "4.1" +array-bytes = "6.1" sp-core = { version = "21.0.0", path = "../../primitives/core" } env_logger = "0.10" From fb501ebd3ac684bab7aa6f655edec04594696b37 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 18 Aug 2023 13:06:39 +0200 Subject: [PATCH 61/62] Nitpicks --- bin/node-sassafras/runtime/src/lib.rs | 2 +- frame/sassafras/src/lib.rs | 12 +++---- frame/sassafras/src/mock.rs | 9 +++--- primitives/consensus/sassafras/src/digests.rs | 8 ++--- primitives/consensus/sassafras/src/lib.rs | 6 +--- primitives/consensus/sassafras/src/ticket.rs | 31 ++++++++++++------- primitives/consensus/sassafras/src/vrf.rs | 9 +++--- test-utils/runtime/src/lib.rs | 2 +- 8 files changed, 43 insertions(+), 36 deletions(-) diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs index 683208d158236..f80bf71d2bb96 100644 --- a/bin/node-sassafras/runtime/src/lib.rs +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -381,7 +381,7 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn ring_context() -> Option { + fn ring_context() -> Option { Sassafras::ring_context() } diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 585a21ee3ada2..5fabf2fde6255 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -58,8 +58,8 @@ use frame_system::{ }; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, - vrf, AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, RingContext, Slot, - SlotDuration, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + vrf, AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, Slot, SlotDuration, + TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ @@ -235,7 +235,7 @@ pub mod pallet { /// In practice: Updatable Universal Reference String and the seed. #[pallet::storage] #[pallet::getter(fn ring_context)] - pub type RingVrfContext = StorageValue<_, RingContext>; + pub type RingContext = StorageValue<_, vrf::RingContext>; /// Genesis configuration for Sassafras protocol. #[pallet::genesis_config] @@ -258,9 +258,9 @@ pub mod pallet { // TODO: davxy... remove for pallet tests warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); - let ring_ctx = RingContext::new_testing(); + let ring_ctx = vrf::RingContext::new_testing(); warn!(target: LOG_TARGET, "... done"); - RingVrfContext::::set(Some(ring_ctx.clone())); + RingContext::::set(Some(ring_ctx.clone())); } } @@ -368,7 +368,7 @@ pub mod pallet { debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); debug!(target: LOG_TARGET, "LOADING RING CTX"); - let Some(ring_ctx) = RingVrfContext::::get() else { + let Some(ring_ctx) = RingContext::::get() else { return Err("Ring context not initialized".into()) }; debug!(target: LOG_TARGET, "... Loaded"); diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index d19a5cdebacd9..c626aedd6acfb 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -22,8 +22,9 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever, *}; use frame_support::traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}; use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityIndex, AuthorityPair, EpochConfiguration, RingProver, Slot, - TicketBody, TicketEnvelope, TicketId, VrfSignature, + digests::PreDigest, + vrf::{RingProver, VrfSignature}, + AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, }; use sp_core::{ crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, @@ -139,8 +140,8 @@ pub fn new_test_ext_with_pairs( if with_ring_context { ext.execute_with(|| { log::debug!("Building new testing ring context"); - let ring_ctx = RingContext::new_testing(); - RingVrfContext::::set(Some(ring_ctx.clone())); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::set(Some(ring_ctx.clone())); }); } diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 1971540351d3c..0c77fe8f95fcc 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Sassafras digests structures and helpers. use crate::{ - ticket::TicketClaim, AuthorityId, AuthorityIndex, AuthoritySignature, EpochConfiguration, - Randomness, Slot, VrfSignature, SASSAFRAS_ENGINE_ID, + ticket::TicketClaim, vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, + EpochConfiguration, Randomness, Slot, SASSAFRAS_ENGINE_ID, }; use scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -50,7 +50,7 @@ pub struct NextEpochDescriptor { pub authorities: Vec, /// Epoch randomness. pub randomness: Randomness, - /// Mutable epoch parameters. If not present previous epoch parameters are used. + /// Configurable parameters. If not present previous epoch parameters are used. pub config: Option, } @@ -60,7 +60,7 @@ pub enum ConsensusLog { /// Provides information about the next epoch parameters. #[codec(index = 1)] NextEpochData(NextEpochDescriptor), - /// Disable the authority with given index (TODO @davxy). + /// Disable the authority with given index. #[codec(index = 2)] OnDisabled(AuthorityIndex), } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index ef4a1f2606a06..651e97850b756 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -28,10 +28,6 @@ use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::vec::Vec; pub use sp_consensus_slots::{Slot, SlotDuration}; -pub use sp_core::bandersnatch::{ - ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, - vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, -}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -129,7 +125,7 @@ sp_api::decl_runtime_apis! { /// API necessary for block authorship with Sassafras. pub trait SassafrasApi { /// Get ring context to be used for ticket construction and verification. - fn ring_context() -> Option; + fn ring_context() -> Option; /// Submit next epoch validator tickets via an unsigned extrinsic. /// This method returns `false` when creation of the extrinsics fails. diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs index 28ae572b72d71..42d9d64434dd8 100644 --- a/primitives/consensus/sassafras/src/ticket.rs +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -17,7 +17,7 @@ //! Primitives related to tickets. -use crate::RingVrfSignature; +use crate::vrf::RingVrfSignature; use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -26,9 +26,10 @@ pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSign /// Ticket identifier. /// /// Its value is the output of a VRF whose inputs cannot be controlled by the -/// creator of the ticket (refer to [`ticket_id_vrf_input`] parameters). +/// ticket's creator (refer to [`crate::vrf::ticket_id_input`] parameters). /// Because of this, it is also used as the ticket score to compare against -/// the epoch ticket's threshold. +/// the epoch ticket's threshold to decide if the ticket is worth being considered +/// for slot assignment (refer to [`ticket_id_threshold`]). pub type TicketId = u128; /// Ticket data persisted on-chain. @@ -61,14 +62,22 @@ pub struct TicketClaim { pub erased_signature: EphemeralSignature, } -/// Computes the threshold for a given epoch as T = (x*s)/(a*v), where: -/// - x: redundancy factor; -/// - s: number of slots in epoch; -/// - a: max number of attempts; -/// - v: number of validator in epoch. -/// The parameters should be chosen such that T <= 1. -/// If `attempts * validators` is zero then we fallback to T = 0 -// TODO-SASS-P3: this formula must be double-checked... +/// Computes ticket-id maximum allowed value for a given epoch. +/// +/// Only ticket identifiers below this threshold should be considered for slot +/// assignment. +/// +/// The value is computed as +/// +/// TicketId::MAX*(redundancy*slots)/(attempts*validators) +/// +/// Where: +/// - `redundancy`: redundancy factor; +/// - `slots`: number of slots in epoch; +/// - `attempts`: max number of tickets attempts per validator; +/// - `validators`: number of validators in epoch. +/// +/// If `attempts * validators = 0` then we return 0. pub fn ticket_id_threshold( redundancy: u32, slots: u32, diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index e32e739a53c87..e26679ba29236 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -17,14 +17,15 @@ //! Utilities related to VRF input, output and signatures. -use crate::{ - Randomness, TicketBody, TicketId, VrfInput, VrfOutput, VrfSignData, SASSAFRAS_ENGINE_ID, -}; +use crate::{Randomness, TicketBody, TicketId, SASSAFRAS_ENGINE_ID}; use scale_codec::Encode; use sp_consensus_slots::Slot; use sp_std::vec::Vec; -pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; +pub use sp_core::bandersnatch::{ + ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, + vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, +}; fn vrf_input_from_data( domain: &[u8], diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 7832902e1a933..058fc4ac493d2 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -694,7 +694,7 @@ impl_runtime_apis! { } impl sp_consensus_sassafras::SassafrasApi for Runtime { - fn ring_context() -> Option { + fn ring_context() -> Option { Sassafras::ring_context() } From ab5a31a1c39301957fea58a675651f0b52c3313b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 24 Aug 2023 19:51:44 +0200 Subject: [PATCH 62/62] Improvements to the digest log management --- client/consensus/sassafras/src/authorship.rs | 35 +++---- .../consensus/sassafras/src/block_import.rs | 20 ++-- client/consensus/sassafras/src/lib.rs | 48 +++++----- client/consensus/sassafras/src/tests.rs | 71 +++++++------- .../consensus/sassafras/src/verification.rs | 94 ++++++++----------- frame/sassafras/src/lib.rs | 80 +++++----------- frame/sassafras/src/mock.rs | 31 +++--- primitives/consensus/sassafras/src/digests.rs | 55 ++++++----- primitives/consensus/sassafras/src/vrf.rs | 14 +-- 9 files changed, 187 insertions(+), 261 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 52386c4715366..6c6f6e54d37ce 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -22,7 +22,7 @@ use super::*; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_sassafras::{ - digests::PreDigest, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, + digests::SlotClaim, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, TicketEnvelope, TicketId, }; use sp_core::{ @@ -43,7 +43,7 @@ pub(crate) fn claim_slot( epoch: &mut Epoch, maybe_ticket: Option<(TicketId, TicketBody)>, keystore: &KeystorePtr, -) -> Option<(PreDigest, AuthorityId)> { +) -> Option<(SlotClaim, AuthorityId)> { if epoch.authorities.is_empty() { return None } @@ -103,9 +103,9 @@ pub(crate) fn claim_slot( warn!(target: LOG_TARGET, "{:?}", output); } - let pre_digest = PreDigest { authority_idx, slot, vrf_signature, ticket_claim }; + let claim = SlotClaim { authority_idx, slot, vrf_signature, ticket_claim }; - Some((pre_digest, authority_id.clone())) + Some((claim, authority_id.clone())) } /// Generate the tickets for the given epoch. @@ -235,7 +235,7 @@ where L: sc_consensus::JustificationSyncLink, ER: std::error::Error + Send + 'static, { - type Claim = (PreDigest, AuthorityId); + type Claim = (SlotClaim, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; type CreateProposer = @@ -313,8 +313,8 @@ where }); } - fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { - vec![::sassafras_pre_digest(claim.0.clone())] + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![DigestItem::from(&claim.0)] } async fn block_import_params( @@ -326,37 +326,24 @@ where (_, public): Self::Claim, epoch_descriptor: Self::AuxData, ) -> Result, ConsensusError> { - // TODO DAVXY SASS-32: this seal may be revisited. - // We already have a VRF signature, this could be completelly redundant. - // The header.hash() can be added to the VRF signed data. - // OR maybe we can maintain this seal but compute it using some of the data in the - // pre-digest - // Another option is to not recompute this signature and push (reuse) the one in the - // pre-digest as the seal let signature = self .keystore - .sign_with( + .bandersnatch_sign( ::ID, - ::CRYPTO_ID, public.as_ref(), header_hash.as_ref(), ) .map_err(|e| ConsensusError::CannotSign(format!("{}. Key {:?}", e, public)))? + .map(|sig| AuthoritySignature::from(sig)) .ok_or_else(|| { ConsensusError::CannotSign(format!( "Could not find key in keystore. Key {:?}", public )) })?; - let signature: AuthoritySignature = signature - .clone() - .try_into() - .map_err(|_| ConsensusError::InvalidSignature(signature, public.to_raw_vec()))?; - - let digest_item = ::sassafras_seal(signature); let mut block = BlockImportParams::new(BlockOrigin::Own, header); - block.post_digests.push(digest_item); + block.post_digests.push(DigestItem::from(&signature)); block.body = Some(body); block.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); @@ -397,7 +384,7 @@ where } fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot); + let parent_slot = find_slot_claim::(&slot_info.chain_head).ok().map(|d| d.slot); // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs index 758068218f4d6..1980d8243a5ff 100644 --- a/client/consensus/sassafras/src/block_import.rs +++ b/client/consensus/sassafras/src/block_import.rs @@ -352,9 +352,9 @@ where .remove_intermediate::>(INTERMEDIATE_KEY)? .epoch_descriptor; - let pre_digest = find_pre_digest::(&block.header) - .expect("valid headers contain a pre-digest; header has been already verified; qed"); - let slot = pre_digest.slot; + let claim = find_slot_claim::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; + let slot = claim.slot; let parent_hash = *block.header.parent_hash(); let parent_header = self @@ -366,9 +366,9 @@ where sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), ) })?; - let parent_slot = find_pre_digest::(&parent_header) - .map(|d| d.slot) - .expect("parent is non-genesis; valid headers contain a pre-digest; header has been already verified; qed"); + let parent_slot = find_slot_claim::(&parent_header) + .map(|claim| claim.slot) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; // Make sure that slot number is strictly increasing if slot <= parent_slot { @@ -409,7 +409,7 @@ where ) })?; - let total_weight = parent_weight + pre_digest.ticket_claim.is_some() as u32; + let total_weight = parent_weight + claim.ticket_claim.is_some() as u32; aux_schema::write_block_weight(hash, total_weight, |values| { block @@ -478,10 +478,10 @@ where let finalized_header = client .header(info.finalized_hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect("finalized headers must exist in db; qed"); + .expect("finalized headers must exist in storage; qed"); - find_pre_digest::(&finalized_header) - .expect("valid blocks have a pre-digest; qed") + find_slot_claim::(&finalized_header) + .expect("valid block header have a slot-claim; qed") .slot }; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index affa0887e4320..5138985a9ea98 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -75,7 +75,7 @@ use sp_runtime::{ // Re-export some primitives. pub use sp_consensus_sassafras::{ - digests::{CompatibleDigestItem, ConsensusLog, NextEpochDescriptor, PreDigest}, + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, vrf, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, @@ -104,13 +104,13 @@ pub const INTERMEDIATE_KEY: &[u8] = b"sass1"; /// Errors encountered by the Sassafras routines. #[derive(Debug, thiserror::Error)] pub enum Error { - /// Multiple Sassafras pre-runtime digests - #[error("Multiple pre-runtime digests")] - MultiplePreRuntimeDigests, - /// No Sassafras pre-runtime digest found - #[error("No pre-runtime digest found")] - NoPreRuntimeDigest, - /// Multiple Sassafras epoch change digests + /// Multiple slot claim digests + #[error("Multiple slot-claim digests")] + MultipleSlotClaimDigests, + /// Missing slot claim digest + #[error("No slot-claim digest found")] + MissingSlotClaimDigest, + /// Multiple epoch change digests #[error("Multiple epoch change digests")] MultipleEpochChangeDigests, /// Could not fetch epoch @@ -292,30 +292,33 @@ pub struct SassafrasIntermediate { pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } -/// Extract the Sassafras pre digest from the given header. +/// Extract the Sassafras slot claim from the given header. /// -/// Pre-runtime digests are mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> { +/// Slot claim digest is mandatory, the function will return `Err` if none is found. +fn find_slot_claim(header: &B::Header) -> Result> { if header.number().is_zero() { - // Genesis block doesn't contain a pre digest so let's generate a - // dummy one to not break any invariant in the rest of the code. + // Genesis block doesn't contain a slot-claim so let's generate a + // dummy one jyst to not break any invariant in the rest of the code. use sp_core::crypto::VrfSecret; let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); let data = vrf::slot_claim_sign_data(&Default::default(), 0.into(), 0); - let vrf_signature = pair.as_ref().vrf_sign(&data); - return Ok(PreDigest { authority_idx: 0, slot: 0.into(), ticket_claim: None, vrf_signature }) + return Ok(SlotClaim { + authority_idx: 0, + slot: 0.into(), + ticket_claim: None, + vrf_signature: pair.as_ref().vrf_sign(&data), + }) } - let mut pre_digest: Option<_> = None; + let mut claim: Option<_> = None; for log in header.digest().logs() { - trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log); - match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { - (Some(_), true) => return Err(sassafras_err(Error::MultiplePreRuntimeDigests)), - (None, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), - (s, false) => pre_digest = s, + match (log.try_into(), claim.is_some()) { + (Ok(_), true) => return Err(sassafras_err(Error::MultipleSlotClaimDigests)), + (Err(_), _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + (Ok(c), false) => claim = Some(c), } } - pre_digest.ok_or_else(|| sassafras_err(Error::NoPreRuntimeDigest)) + claim.ok_or_else(|| sassafras_err(Error::MissingSlotClaimDigest)) } /// Extract the Sassafras epoch change digest from the given header, if it exists. @@ -324,7 +327,6 @@ fn find_next_epoch_digest( ) -> Result, Error> { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: LOG_TARGET, "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); match (log, epoch_digest.is_some()) { (Some(ConsensusLog::NextEpochData(_)), true) => diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs index 8f8fe89b086a8..7aadfd96458d5 100644 --- a/client/consensus/sassafras/src/tests.rs +++ b/client/consensus/sassafras/src/tests.rs @@ -58,10 +58,7 @@ type TestClient = substrate_test_runtime_client::client::Client< type TestSelectChain = substrate_test_runtime_client::LongestChain; -type TestTransaction = - sc_client_api::TransactionFor; - -type TestBlockImportParams = BlockImportParams; +type TestBlockImportParams = BlockImportParams; type TestViableEpochDescriptor = sc_consensus_epochs::ViableEpochDescriptor; @@ -106,8 +103,7 @@ impl TestProposer { impl Proposer for TestProposer { type Error = TestError; - type Transaction = TestTransaction; - type Proposal = future::Ready, Self::Error>>; + type Proposal = future::Ready, Self::Error>>; type ProofRecording = DisableProofRecording; type Proof = (); @@ -227,7 +223,7 @@ impl TestContext { Self { client, backend, link, block_import, verifier, keystore } } - fn import_block(&mut self, mut params: TestBlockImportParams) -> Hash { + fn import_block(&mut self, mut params: TestBlockImportParams) -> Result { let post_hash = params.post_hash(); if params.post_digests.is_empty() { @@ -239,18 +235,14 @@ impl TestContext { } } - match block_on(self.block_import.import_block(params)).unwrap() { - ImportResult::Imported(_) => (), - _ => panic!("expected block to be imported"), - } - - post_hash + block_on(self.block_import.import_block(params)).map(|ir| match ir { + ImportResult::Imported(_) => post_hash, + _ => panic!("Unexpected outcome"), + }) } fn verify_block(&mut self, params: TestBlockImportParams) -> TestBlockImportParams { - let tmp_params = params.clear_storage_changes_and_mutate(); - let tmp_params = block_on(self.verifier.verify(tmp_params)).unwrap(); - tmp_params.clear_storage_changes_and_mutate() + block_on(self.verifier.verify(params)).unwrap() } fn epoch_data(&self, parent_hash: &Hash, parent_number: u64, slot: Slot) -> Epoch { @@ -297,8 +289,8 @@ impl TestContext { let proposer = block_on(self.init(&parent_header)).unwrap(); let slot = slot.unwrap_or_else(|| { - let parent_pre_digest = find_pre_digest::(&parent_header).unwrap(); - parent_pre_digest.slot + 1 + let parent_claim = find_slot_claim::(&parent_header).unwrap(); + parent_claim.slot + 1 }); // TODO DAVXY: here maybe we can use the epoch.randomness??? @@ -311,25 +303,22 @@ impl TestContext { .unwrap() .unwrap(); - let pre_digest = PreDigest { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; - let digest = sp_runtime::generic::Digest { - logs: vec![DigestItem::sassafras_pre_digest(pre_digest)], - }; + let claim = SlotClaim { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; + let digest = sp_runtime::generic::Digest { logs: vec![DigestItem::from(&claim)] }; let mut block = proposer.propose_block(digest); let epoch_descriptor = self.epoch_descriptor(&parent_hash, parent_number, slot); - // Sign the pre-sealed hash of the block and then add it to a digest item. + // Sign the pre-sealed hash of the block and then add it to the digest. let hash = block.header.hash(); - let signature = self + let signature: AuthoritySignature = self .keystore .bandersnatch_sign(SASSAFRAS, &public, hash.as_ref()) .unwrap() .unwrap() - .try_into() - .unwrap(); - let seal = DigestItem::sassafras_seal(signature); + .into(); + let seal = DigestItem::from(&signature); block.header.digest_mut().push(seal); let mut params = BlockImportParams::new(BlockOrigin::Own, block.header); @@ -344,7 +333,7 @@ impl TestContext { // This skips verification. fn propose_and_import_block(&mut self, parent_hash: Hash, slot: Option) -> Hash { let params = self.propose_block(parent_hash, slot); - self.import_block(params) + self.import_block(params).unwrap() } // Propose and import n valid blocks that are built on top of the given parent. @@ -444,7 +433,7 @@ fn claim_primary_slots_works() { .tickets_aux .insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); - let (pre_digest, auth_id) = authorship::claim_slot( + let (claim, auth_id) = authorship::claim_slot( 0.into(), &mut epoch, Some((ticket_id, ticket_body.clone())), @@ -453,7 +442,7 @@ fn claim_primary_slots_works() { .unwrap(); assert!(epoch.tickets_aux.is_empty()); - assert_eq!(pre_digest.authority_idx, alice_authority_idx); + assert_eq!(claim.authority_idx, alice_authority_idx); assert_eq!(auth_id, Keyring::Alice.public().into()); // Fail if we have ticket aux data but not the authority key in out keystore @@ -468,19 +457,19 @@ fn claim_primary_slots_works() { } #[test] -#[should_panic(expected = "valid headers contain a pre-digest")] -fn import_rejects_block_without_pre_digest() { +fn import_rejects_block_without_slot_claim() { let mut env = TestContext::new(); let mut import_params = env.propose_block(env.client.info().genesis_hash, Some(999.into())); // Remove logs from the header import_params.header.digest_mut().logs.clear(); - env.import_block(import_params); + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: No slot-claim digest found"); } #[test] -#[should_panic(expected = "Unexpected epoch change")] fn import_rejects_block_with_unexpected_epoch_changes() { let mut env = TestContext::new(); @@ -498,11 +487,12 @@ fn import_rejects_block_with_unexpected_epoch_changes() { let digest = import_params.header.digest_mut(); digest.logs.insert(digest.logs.len() - 1, digest_item); - env.import_block(import_params); + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: Unexpected epoch change"); } #[test] -#[should_panic(expected = "Expected epoch change to happen")] fn import_rejects_block_with_missing_epoch_changes() { let mut env = TestContext::new(); @@ -516,7 +506,12 @@ fn import_rejects_block_with_missing_epoch_changes() { // (Implementation detail: should be the second to last entry, just before the seal) digest.logs.remove(digest.logs.len() - 2); - env.import_block(import_params); + let res = env.import_block(import_params); + + assert!(res + .unwrap_err() + .to_string() + .contains("Import failed: Expected epoch change to happen")); } #[test] @@ -901,7 +896,7 @@ async fn sassafras_network_progress() { // another babe instance and then tries to build a block in the same slot making // this test fail. let parent_header = client_clone.header(parent).ok().flatten().unwrap(); - let slot = Slot::from(find_pre_digest::(&parent_header).unwrap().slot + 1); + let slot = Slot::from(find_slot_claim::(&parent_header).unwrap().slot + 1); async move { Ok((InherentDataProvider::new(slot),)) } }); let sassafras_params = SassafrasWorkerParams { diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 4eed31fd21b9e..08679331a70e1 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -28,28 +28,12 @@ use sp_core::{ // Allowed slot drift. const MAX_SLOT_DRIFT: u64 = 1; -/// Verification parameters -struct VerificationParams<'a, B: 'a + BlockT> { - /// The header being verified. - header: B::Header, - /// The pre-digest of the header being verified. - pre_digest: &'a PreDigest, - /// The slot number of the current time. - slot_now: Slot, - /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. - epoch: &'a Epoch, - /// Origin - origin: BlockOrigin, - /// Expected ticket for this block. - maybe_ticket: Option<(TicketId, TicketBody)>, -} - /// Verified information struct VerifiedHeaderInfo { /// Authority index. authority_id: AuthorityId, - /// Seal found within the header. - seal: DigestItem, + /// Seal digest found within the header. + seal_digest: DigestItem, } /// Check a header has been signed by the right key. If the slot is too far in @@ -62,31 +46,32 @@ struct VerifiedHeaderInfo { /// The given header can either be from a primary or secondary slot assignment, /// with each having different validation logic. fn check_header( - params: VerificationParams, + mut header: B::Header, + claim: &SlotClaim, + slot_now: Slot, + epoch: &Epoch, + origin: BlockOrigin, + maybe_ticket: Option<(TicketId, TicketBody)>, ) -> Result, Error> { - let VerificationParams { mut header, pre_digest, slot_now, epoch, origin, maybe_ticket } = - params; - - let seal = header - .digest_mut() - .pop() - .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; - // Check that the slot is not in the future, with some drift being allowed. - if pre_digest.slot > slot_now + MAX_SLOT_DRIFT { - header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot)) + if claim.slot > slot_now + MAX_SLOT_DRIFT { + // header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, claim.slot)) } - let Some(authority_id) = epoch.authorities.get(pre_digest.authority_idx as usize) else { + let Some(authority_id) = epoch.authorities.get(claim.authority_idx as usize) else { return Err(sassafras_err(Error::SlotAuthorNotFound)) }; // Check header signature (aka the Seal) - let signature = seal - .as_sassafras_seal() - .ok_or_else(|| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + let seal_digest = header + .digest_mut() + .pop() + .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + + let signature = AuthoritySignature::try_from(&seal_digest) + .map_err(|_| sassafras_err(Error::HeaderBadSeal(header.hash())))?; let pre_hash = header.hash(); if !AuthorityPair::verify(&signature, &pre_hash, authority_id) { @@ -95,10 +80,9 @@ fn check_header( // Optionally check ticket ownership - let mut sign_data = - vrf::slot_claim_sign_data(&epoch.randomness, pre_digest.slot, epoch.epoch_idx); + let mut sign_data = vrf::slot_claim_sign_data(&epoch.randomness, claim.slot, epoch.epoch_idx); - match (&maybe_ticket, &pre_digest.ticket_claim) { + match (&maybe_ticket, &claim.ticket_claim) { (Some((_ticket_id, ticket_body)), ticket_claim) => { debug!(target: LOG_TARGET, "checking primary"); @@ -110,7 +94,7 @@ fn check_header( ticket_body.attempt_idx, epoch.epoch_idx, ); - let revealed_output = pre_digest + let revealed_output = claim .vrf_signature .outputs .get(1) @@ -136,8 +120,8 @@ fn check_header( }, (None, None) => { debug!(target: LOG_TARGET, "checking secondary"); - let idx = authorship::secondary_authority_index(pre_digest.slot, epoch); - if idx != pre_digest.authority_idx { + let idx = authorship::secondary_authority_index(claim.slot, epoch); + if idx != claim.authority_idx { error!(target: LOG_TARGET, "Bad secondary authority index"); return Err(Error::SlotAuthorNotFound) } @@ -150,13 +134,13 @@ fn check_header( } // Check per-slot vrf proof - if !authority_id.as_inner_ref().vrf_verify(&sign_data, &pre_digest.vrf_signature) { + if !authority_id.as_inner_ref().vrf_verify(&sign_data, &claim.vrf_signature) { warn!(target: LOG_TARGET, ">>> VERIFICATION FAILED (pri = {})!!!", maybe_ticket.is_some()); return Err(sassafras_err(Error::VrfVerificationFailed)) } warn!(target: LOG_TARGET, ">>> VERIFICATION OK (pri = {})!!!", maybe_ticket.is_some()); - let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal }; + let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal_digest }; Ok(CheckedHeader::Checked(header, info)) } @@ -367,7 +351,7 @@ where .header_metadata(parent_hash) .map_err(Error::::FetchParentHeader)?; - let pre_digest = find_pre_digest::(&block.header)?; + let claim = find_slot_claim::(&block.header)?; let (checked_header, epoch_descriptor) = { let epoch_changes = self.epoch_changes.shared_data(); @@ -376,7 +360,7 @@ where descendent_query(&*self.client), &parent_hash, parent_header_metadata.number, - pre_digest.slot, + claim.slot, ) .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or(Error::::FetchEpoch(parent_hash))?; @@ -387,19 +371,18 @@ where let maybe_ticket = self .client .runtime_api() - .slot_ticket(parent_hash, pre_digest.slot) + .slot_ticket(parent_hash, claim.slot) .ok() .unwrap_or_else(|| None); - let verification_params = VerificationParams { - header: block.header.clone(), - pre_digest: &pre_digest, + let checked_header = check_header::( + block.header.clone(), + &claim, slot_now, - epoch: viable_epoch.as_ref(), - origin: block.origin, + viable_epoch.as_ref(), + block.origin, maybe_ticket, - }; - let checked_header = check_header::(verification_params)?; + )?; (checked_header, epoch_descriptor) }; @@ -412,7 +395,7 @@ where if let Err(err) = self .check_and_report_equivocation( slot_now, - pre_digest.slot, + claim.slot, &block.header, &verified_info.authority_id, &block.origin, @@ -437,7 +420,7 @@ where .create_inherent_data() .await .map_err(Error::::CreateInherents)?; - inherent_data.sassafras_replace_inherent_data(&pre_digest.slot); + inherent_data.sassafras_replace_inherent_data(&claim.slot); self.check_inherents( new_block.clone(), parent_hash, @@ -461,8 +444,7 @@ where block.header = pre_header; block.post_hash = Some(hash); - // TODO DAVXY: seal required??? - block.post_digests.push(verified_info.seal); + block.post_digests.push(verified_info.seal_digest); block.insert_intermediate( INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }, diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 585a21ee3ada2..51fd3e8ad7a5c 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -57,7 +57,7 @@ use frame_system::{ pallet_prelude::{BlockNumberFor, HeaderFor}, }; use sp_consensus_sassafras::{ - digests::{ConsensusLog, NextEpochDescriptor, PreDigest}, + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, vrf, AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, RingContext, Slot, SlotDuration, TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, }; @@ -188,7 +188,7 @@ pub mod pallet { /// if per-block initialization has already been called for current block. #[pallet::storage] #[pallet::getter(fn initialized)] - pub type Initialized = StorageValue<_, PreDigest>; + pub type Initialized = StorageValue<_, SlotClaim>; /// The configuration for the current epoch. #[pallet::storage] @@ -274,31 +274,23 @@ pub mod pallet { return Weight::zero() } - let pre_digest = >::digest() + let claim = >::digest() .logs .iter() - .filter_map(|digest| { - digest.as_pre_runtime().and_then(|(id, mut data)| { - if id == SASSAFRAS_ENGINE_ID { - PreDigest::decode(&mut data).ok() - } else { - None - } - }) - }) + .filter_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) .next() - .expect("Valid Sassafras block should have a pre-digest. qed"); + .expect("Valid block must have a slot claim. qed"); - CurrentSlot::::put(pre_digest.slot); + CurrentSlot::::put(claim.slot); // On the first non-zero block (i.e. block #1) this is where the first epoch // (epoch #0) actually starts. We need to adjust internal storage accordingly. if *GenesisSlot::::get() == 0 { - debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", pre_digest.slot); - Self::initialize_genesis_epoch(pre_digest.slot) + debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", claim.slot); + Self::initialize_genesis_epoch(claim.slot) } - Initialized::::put(pre_digest); + Initialized::::put(claim); // Enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now); @@ -308,13 +300,13 @@ pub mod pallet { /// Block finalization fn on_finalize(_now: BlockNumberFor) { - // TODO DAVXY: check if is a disabled validator? + // TODO @davxy: check if is a disabled validator? // At the end of the block, we can safely include the new VRF output from // this block into the randomness accumulator. If we've determined // that this block was the first in a new epoch, the changeover logic has // already occurred at this point. - let pre_digest = Initialized::::take() + let claim = Initialized::::take() .expect("Finalization is called after initialization; qed."); let claim_input = vrf::slot_claim_input( @@ -322,7 +314,7 @@ pub mod pallet { CurrentSlot::::get(), EpochIndex::::get(), ); - let claim_output = pre_digest + let claim_output = claim .vrf_signature .outputs .get(0) @@ -334,7 +326,7 @@ pub mod pallet { // If we are in the epoch's second half, we start sorting the next epoch tickets. let epoch_duration = T::EpochDuration::get(); - let current_slot_idx = Self::slot_index(pre_digest.slot); + let current_slot_idx = Self::slot_index(claim.slot); if current_slot_idx >= epoch_duration / 2 { let mut metadata = TicketsMeta::::get(); if metadata.segments_count != 0 { @@ -381,14 +373,14 @@ pub mod pallet { debug!(target: LOG_TARGET, "... Built"); // Check tickets score - let next_auth = NextAuthorities::::get(); - let epoch_config = EpochConfig::::get(); + let next_auth = Self::next_authorities(); + let next_config = Self::next_config().unwrap_or_else(|| Self::config()); // Current slot should be less than half of epoch duration. let epoch_duration = T::EpochDuration::get(); let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( - epoch_config.redundancy_factor, + next_config.redundancy_factor, epoch_duration as u32, - epoch_config.attempts_number, + next_config.attempts_number, next_auth.len() as u32, ); @@ -495,7 +487,8 @@ pub mod pallet { fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_tickets { tickets } = call { - // Discard tickets not coming from the local node + // Discard tickets not coming from the local node or that are not + // yet included in a block debug!( target: LOG_TARGET, "Validating unsigned from {} source", @@ -514,8 +507,9 @@ pub mod pallet { // Maybe this is one valid reason to introduce proxies. // In short the question is >>> WHO HAS THE RIGHT TO SUBMIT A TICKET? <<< // A) The current epoch validators - // B) The next epoch validators - // C) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) + // B) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) + // TODO @davxy: maybe we also provide a signed extrinsic to submit tickets + // where the submitter doesn't pay if the tickets are good? warn!( target: LOG_TARGET, "Rejecting unsigned transaction from external sources.", @@ -527,40 +521,12 @@ pub mod pallet { let epoch_duration = T::EpochDuration::get(); let current_slot_idx = Self::current_slot_index(); - if current_slot_idx >= epoch_duration / 2 { + if current_slot_idx > epoch_duration / 2 { warn!(target: LOG_TARGET, "Timeout to propose tickets, bailing out.",); return InvalidTransaction::Stale.into() } - // // Check tickets score - // let next_auth = NextAuthorities::::get(); - // let epoch_config = EpochConfig::::get(); - - // TODO DAVXY - // If we insert the pre-computed id within the body then we can: - // 1. check for equality (not strictly required as far as the output is < threshold) - // 2. avoid recompute it in the submit call that will follow... - // Unfortunatelly here we can't discard a subset of the tickets... - // so we have to decide if we want to discard the whole set in presence of "bad - // apples" - // let threshold = sp_consensus_sassafras::compute_ticket_id_threshold( - // epoch_config.redundancy_factor, - // epoch_duration as u32, - // epoch_config.attempts_number, - // next_auth.len() as u32, - // ); - // for ticket in tickets { - // let _preout = ticket.vrf_preout.clone(); - // // TODO DAVXY: here we have to call vrf preout.make_bytes()... - // // Available with thin-vrf. Not available with plain schnorrkel without public - // // key. For now, just set as the preout - // // Check score... - // } - // This should be set such that it is discarded after the first epoch half - // TODO-SASS-P3: double check this. Should we then check again in the extrinsic - // itself? Is this run also just before the extrinsic execution or only on tx queue - // insertion? let tickets_longevity = epoch_duration / 2 - current_slot_idx; let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs index d19a5cdebacd9..cd95ae18cbaf4 100644 --- a/frame/sassafras/src/mock.rs +++ b/frame/sassafras/src/mock.rs @@ -20,9 +20,9 @@ use crate::{self as pallet_sassafras, SameAuthoritiesForever, *}; use frame_support::traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}; -use scale_codec::Encode; +// use scale_codec::Encode; use sp_consensus_sassafras::{ - digests::PreDigest, AuthorityIndex, AuthorityPair, EpochConfiguration, RingProver, Slot, + digests::SlotClaim, AuthorityIndex, AuthorityPair, EpochConfiguration, RingProver, Slot, TicketBody, TicketEnvelope, TicketId, VrfSignature, }; use sp_core::{ @@ -296,27 +296,20 @@ fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { pair.as_ref().vrf_sign(&data) } -/// Produce a `PreDigest` instance for the given parameters. -pub fn make_pre_digest( +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair, -) -> PreDigest { +) -> SlotClaim { let vrf_signature = slot_claim_vrf_signature(slot, pair); - PreDigest { authority_idx, slot, vrf_signature, ticket_claim: None } + SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } } -/// Produce a `PreDigest` instance for the given parameters and wrap the result into a `Digest` -/// instance. -pub fn make_wrapped_pre_digest( - authority_idx: AuthorityIndex, - slot: Slot, - pair: &AuthorityPair, -) -> Digest { - let pre_digest = make_pre_digest(authority_idx, slot, pair); - let log = - DigestItem::PreRuntime(sp_consensus_sassafras::SASSAFRAS_ENGINE_ID, pre_digest.encode()); - Digest { logs: vec![log] } +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } } pub fn initialize_block( @@ -325,7 +318,7 @@ pub fn initialize_block( parent_hash: H256, pair: &AuthorityPair, ) -> Digest { - let digest = make_wrapped_pre_digest(0, slot, pair); + let digest = make_digest(0, slot, pair); System::reset_events(); System::initialize(&number, &parent_hash, &digest); Sassafras::on_initialize(number); @@ -342,7 +335,7 @@ pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { Sassafras::on_finalize(System::block_number()); let parent_hash = System::finalize().hash(); - let digest = make_wrapped_pre_digest(0, slot, pair); + let digest = make_digest(0, slot, pair); System::reset_events(); System::initialize(&number, &parent_hash, &digest); diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 1971540351d3c..3b54f92b65c90 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -28,9 +28,11 @@ use scale_info::TypeInfo; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; -/// Sassafras slot assignment pre-digest. +/// Epoch slot claim digest entry. +/// +/// This is mandatory for each block. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct PreDigest { +pub struct SlotClaim { /// Authority index that claimed the slot. pub authority_idx: AuthorityIndex, /// Corresponding slot number. @@ -43,18 +45,22 @@ pub struct PreDigest { /// Information about the next epoch. /// -/// This is broadcast in the first block of each epoch. +/// This is mandatory in the first block of each epoch. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { /// Authorities list. pub authorities: Vec, /// Epoch randomness. pub randomness: Randomness, - /// Mutable epoch parameters. If not present previous epoch parameters are used. + /// Changeable epoch parameters. + /// + /// If not present previous epoch parameters are used. pub config: Option, } -/// Consensus log item. +/// Runtime digest entries. +/// +/// Entries which may be generated by on-chain code. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { /// Provides information about the next epoch parameters. @@ -65,35 +71,28 @@ pub enum ConsensusLog { OnDisabled(AuthorityIndex), } -/// A digest item which is usable by Sassafras. -pub trait CompatibleDigestItem { - /// Construct a digest item which contains a `PreDigest`. - fn sassafras_pre_digest(seal: PreDigest) -> Self; - - /// If this item is a `PreDigest`, return it. - fn as_sassafras_pre_digest(&self) -> Option; - - /// Construct a digest item which contains an `AuthoritySignature`. - fn sassafras_seal(signature: AuthoritySignature) -> Self; - - /// If this item is an `AuthoritySignature`, return it. - fn as_sassafras_seal(&self) -> Option; +impl TryFrom<&DigestItem> for SlotClaim { + type Error = (); + fn try_from(item: &DigestItem) -> Result { + item.pre_runtime_try_to(&SASSAFRAS_ENGINE_ID).ok_or(()) + } } -impl CompatibleDigestItem for DigestItem { - fn sassafras_pre_digest(digest: PreDigest) -> Self { - DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, digest.encode()) +impl From<&SlotClaim> for DigestItem { + fn from(claim: &SlotClaim) -> Self { + DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, claim.encode()) } +} - fn as_sassafras_pre_digest(&self) -> Option { - self.pre_runtime_try_to(&SASSAFRAS_ENGINE_ID) +impl TryFrom<&DigestItem> for AuthoritySignature { + type Error = (); + fn try_from(item: &DigestItem) -> Result { + item.seal_try_to(&SASSAFRAS_ENGINE_ID).ok_or(()) } +} - fn sassafras_seal(signature: AuthoritySignature) -> Self { +impl From<&AuthoritySignature> for DigestItem { + fn from(signature: &AuthoritySignature) -> Self { DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) } - - fn as_sassafras_seal(&self) -> Option { - self.seal_try_to(&SASSAFRAS_ENGINE_ID) - } } diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index e32e739a53c87..12396964a926f 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -17,9 +17,7 @@ //! Utilities related to VRF input, output and signatures. -use crate::{ - Randomness, TicketBody, TicketId, VrfInput, VrfOutput, VrfSignData, SASSAFRAS_ENGINE_ID, -}; +use crate::{Randomness, TicketBody, TicketId, VrfInput, VrfOutput, VrfSignData}; use scale_codec::Encode; use sp_consensus_slots::Slot; use sp_std::vec::Vec; @@ -51,7 +49,11 @@ pub fn slot_claim_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfI /// Signing-data to claim slot ownership during block production. pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { let vrf_input = slot_claim_input(randomness, slot, epoch); - VrfSignData::new_unchecked(&SASSAFRAS_ENGINE_ID, Some("slot-claim-transcript"), Some(vrf_input)) + VrfSignData::new_unchecked( + b"sassafras-slot-claim-transcript-v1.0", + Option::<&[u8]>::None, + Some(vrf_input), + ) } /// VRF input to generate the ticket id. @@ -73,8 +75,8 @@ pub fn revealed_key_input(randomness: &Randomness, attempt: u32, epoch: u64) -> /// Data to be signed via ring-vrf. pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput) -> VrfSignData { VrfSignData::new_unchecked( - &SASSAFRAS_ENGINE_ID, - &[b"ticket-body-transcript", ticket_body.encode().as_slice()], + b"sassafras-ticket-body-transcript-v1.0", + Some(ticket_body.encode().as_slice()), Some(ticket_id_input), ) }