Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

52 changes: 41 additions & 11 deletions frame/election-provider-multi-phase/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,13 @@ pub enum Phase<Bn> {
/// advising validators not to bother running the unsigned offchain worker.
///
/// As validator nodes are free to edit their OCW code, they could simply ignore this advisory
/// and always compute their own solution. However, by default, when the unsigned phase is passive,
/// the offchain workers will not bother running.
/// and always compute their own solution. However, by default, when the unsigned phase is
/// passive, the offchain workers will not bother running.
Unsigned((bool, Bn)),
/// The emergency phase. This is enabled upon a failing call to `T::ElectionProvider::elect`.
/// After that, the only way to leave this phase is through a successful
/// `T::ElectionProvider::elect`.
Emergency,
}

impl<Bn> Default for Phase<Bn> {
Expand All @@ -323,6 +327,11 @@ impl<Bn> Default for Phase<Bn> {
}

impl<Bn: PartialEq + Eq> Phase<Bn> {
/// Whether the phase is emergency or not.
pub fn is_emergency(&self) -> bool {
matches!(self, Phase::Emergency)
}

/// Whether the phase is signed or not.
pub fn is_signed(&self) -> bool {
matches!(self, Phase::Signed)
Expand Down Expand Up @@ -581,7 +590,7 @@ pub mod pallet {
/// Configuration for the fallback
type Fallback: Get<FallbackStrategy>;

/// Origin that can set the minimum score.
/// Origin that can control this pallet.
type ForceOrigin: EnsureOrigin<Self::Origin>;

/// The configuration of benchmarking.
Expand Down Expand Up @@ -793,6 +802,17 @@ pub mod pallet {
<MinimumUntrustedScore<T>>::set(maybe_next_score);
Ok(())
}

#[pallet::weight(T::DbWeight::get().reads_writes(1, 1))]
fn set_emergency_election_result(
origin: OriginFor<T>,
solution: ReadySolution<T::AccountId>,
) -> DispatchResult {
T::ForceOrigin::ensure_origin(origin)?;
ensure!(Self::current_phase().is_emergency(), <Error<T>>::CallNotAllowed);
<QueuedSolution<T>>::put(solution);
Ok(())
}
}

#[pallet::event]
Expand Down Expand Up @@ -828,6 +848,8 @@ pub mod pallet {
PreDispatchWeakSubmission,
/// OCW submitted solution for wrong round
OcwCallWrongEra,
/// The call is now allowed at this point.
CallNotAllowed,
}

#[pallet::origin]
Expand Down Expand Up @@ -1162,14 +1184,14 @@ impl<T: Config> Pallet<T> {
/// 1. Increment round.
/// 2. Change phase to [`Phase::Off`]
/// 3. Clear all snapshot data.
fn post_elect() {
// inc round
fn rotate_round() {
// inc round.
<Round<T>>::mutate(|r| *r = *r + 1);

// change phase
// phase is off now.
<CurrentPhase<T>>::put(Phase::Off);

// kill snapshots
// kill snapshots.
Self::kill_snapshot();
}

Expand Down Expand Up @@ -1219,10 +1241,18 @@ impl<T: Config> ElectionProvider<T::AccountId, T::BlockNumber> for Pallet<T> {
type DataProvider = T::DataProvider;

fn elect() -> Result<(Supports<T::AccountId>, Weight), Self::Error> {
let outcome_and_weight = Self::do_elect();
// IMPORTANT: regardless of if election was `Ok` or `Err`, we shall do some cleanup.
Self::post_elect();
outcome_and_weight
match Self::do_elect() {
Ok((supports, weight)) => {
// all went okay, put sign to be Off, clean snapshot, etc.
Self::rotate_round();
Ok((supports, weight))
},
Err(why) => {
log!(error, "Entering emergency mode.");
<CurrentPhase<T>>::put(Phase::Emergency);
Err(why)
}
}
}
}

Expand Down
8 changes: 5 additions & 3 deletions primitives/storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,12 @@ impl PrefixedStorageKey {

/// Storage data associated to a [`StorageKey`].
#[derive(PartialEq, Eq, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode))]
#[cfg_attr(
feature = "std",
derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode, Default)
)]
pub struct StorageData(
#[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))]
pub Vec<u8>,
#[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec<u8>,
);

/// Map of data to use in a storage, it is a collection of
Expand Down
2 changes: 2 additions & 0 deletions utils/frame/remote-externalities/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ env_logger = "0.8.2"
log = "0.4.11"
codec = { package = "parity-scale-codec", version = "2.0.0" }

serde_json = "1.0"

sp-io = { version = "3.0.0", path = "../../../primitives/io" }
sp-core = { version = "3.0.0", path = "../../../primitives/core" }
sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" }
Expand Down
51 changes: 39 additions & 12 deletions utils/frame/remote-externalities/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,13 @@ use sp_core::{
};
use codec::{Encode, Decode};
use sp_runtime::traits::Block as BlockT;
use jsonrpsee_ws_client::{WsClientBuilder, WsClient};
use jsonrpsee_ws_client::{WsClientBuilder, WsClient, v2::params::JsonRpcParams, traits::Client};

type KeyPair = (StorageKey, StorageData);

const LOG_TARGET: &str = "remote-ext";
const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io";
const BATCH_SIZE: usize = 512;

jsonrpsee_proc_macros::rpc_client_api! {
RpcApi<B: BlockT> {
Expand Down Expand Up @@ -333,16 +334,28 @@ impl<B: BlockT> Builder<B> {
info!(target: LOG_TARGET, "Querying a total of {} keys", keys.len());

let mut key_values: Vec<KeyPair> = vec![];
for key in keys {
let value =
RpcApi::<B>::get_storage(self.as_online().rpc_client(), key.clone(), Some(at))
.await
.map_err(|e| {
error!(target: LOG_TARGET, "Error = {:?}", e);
"rpc get_storage failed"
})?;
key_values.push((key, value));
if key_values.len() % 1000 == 0 {
let client = self.as_online().rpc_client();
for chunk_keys in keys.chunks(BATCH_SIZE) {
let batch = chunk_keys
.iter()
.cloned()
.map(|key| {
assert!(key.0.len() == 32);
(
"state_getStorage",
JsonRpcParams::Array(vec![serde_json::to_value(key).unwrap()]),
)
})
.collect::<Vec<_>>();
log::trace!(target: LOG_TARGET, "sending batch: {:?}", batch);
let values = client.batch_request::<StorageData>(batch).await.unwrap();
assert_eq!(chunk_keys.len(), values.len());
for (idx, key) in chunk_keys.into_iter().enumerate() {
let value = values[idx].clone();
key_values.push((key.clone(), value));
}

if key_values.len() % (10 * BATCH_SIZE) == 0 {
let ratio: f64 = key_values.len() as f64 / keys_count as f64;
debug!(
target: LOG_TARGET,
Expand Down Expand Up @@ -529,7 +542,21 @@ mod remote_tests {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
modules: vec!["Proxy".to_owned()],
modules: vec!["System".to_owned()],
..Default::default()
}))
.build()
.await
.expect("Can't reach the remote node. Is it running?")
.execute_with(|| {});
}

#[tokio::test]
async fn can_build_few_pallet() {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
modules: vec!["Proxy".to_owned(), "Multisig".to_owned(), "Balances".to_owned()],
..Default::default()
}))
.build()
Expand Down