Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
74 commits
Select commit Hold shift + click to select a range
1d5290e
babe: allow skipping epochs in pallet
andresilva Jun 21, 2022
5df7281
babe: detect and skip epochs on client
andresilva Jun 21, 2022
2a52ee5
Merge branch 'master' into andre/babe-skip-epochs
andresilva Sep 13, 2022
cbf16c9
Merge branch 'master' into andre/babe-skip-epochs
andresilva Oct 12, 2022
615fc00
Merge branch 'master' into andre/babe-skip-epochs
andresilva Oct 17, 2022
2641a3f
babe: cleaner epoch util functions
andresilva Oct 18, 2022
04a18ac
babe: add test for runtime handling of skipped epochs
andresilva Oct 18, 2022
402d871
babe: simpler implementation of client handling of skipped epochs
andresilva Oct 18, 2022
e2a58e9
babe: test client-side handling of skipped epochs
andresilva Oct 18, 2022
8cc10da
babe: add comments on client-side skipped epochs
andresilva Oct 18, 2022
b81f940
babe: remove emptyline
andresilva Oct 18, 2022
83332a4
babe: make it resilient to forks
andresilva Oct 18, 2022
1ea9407
babe: typo
andresilva Oct 18, 2022
5bcb243
babe: overflow-safe math
andresilva Oct 24, 2022
7fde70d
babe: add test for skipping epochs across different forks
andresilva Oct 24, 2022
58836a4
Merge branch 'master' into andre/babe-skip-epochs
andresilva Oct 24, 2022
2acfdeb
zombienet: warp-sync integration test added
michalkucharczyk Nov 10, 2022
7c0c9e2
spelling
michalkucharczyk Nov 10, 2022
11ac063
Readme corrected
michalkucharczyk Nov 10, 2022
a843ca1
dir name updated
michalkucharczyk Nov 10, 2022
fa8350c
Check second phase of warp sync
michalkucharczyk Nov 11, 2022
d7a2a75
zombienet pipeline enable + naive test network
michalkucharczyk Nov 14, 2022
0ad17cf
zombienet stage added
michalkucharczyk Nov 14, 2022
eb0f65b
paritypr/substrate-debug image added for zombienet testing
michalkucharczyk Nov 14, 2022
20a109d
debugs added
michalkucharczyk Nov 14, 2022
4e63baa
debugs added
michalkucharczyk Nov 14, 2022
9180782
buildah problem fixed
michalkucharczyk Nov 15, 2022
0e7923f
rollback
michalkucharczyk Nov 15, 2022
df0087c
runner tag
michalkucharczyk Nov 15, 2022
1c52738
test name corrected
michalkucharczyk Nov 15, 2022
05f403f
dir renamed (regex problem)
michalkucharczyk Nov 15, 2022
4307c5d
Merge remote-tracking branch 'origin/master' into mku-basic-warp-sync…
michalkucharczyk Nov 15, 2022
9e1ba5b
common code clean up
michalkucharczyk Nov 15, 2022
e27fbc5
common code clean up
michalkucharczyk Nov 15, 2022
e2867b4
fix
michalkucharczyk Nov 15, 2022
0649849
warp sync test improvements
michalkucharczyk Nov 16, 2022
4d10841
full sha used
michalkucharczyk Nov 16, 2022
cb99176
disable tracing for nodes
michalkucharczyk Nov 16, 2022
37b2167
COMMON_USER -> DOCKERIO_USER
michalkucharczyk Nov 16, 2022
8f1cbe4
refs reworked
michalkucharczyk Nov 16, 2022
8f5f607
paritypr/substrate image used
michalkucharczyk Nov 16, 2022
80de118
DOCKERIO -> DOCKER
michalkucharczyk Nov 16, 2022
6ac5d72
generate-ws-db toml cleanup
michalkucharczyk Nov 16, 2022
5391f96
improvements
michalkucharczyk Nov 18, 2022
c7a33fc
fix
michalkucharczyk Nov 18, 2022
cc7dc23
Merge remote-tracking branch 'origin/master' into mku-basic-warp-sync…
Nov 18, 2022
796a292
Merge remote-tracking branch 'origin/andre/babe-skip-epochs' into mku…
michalkucharczyk Nov 21, 2022
869ab58
Merge remote-tracking branch 'origin/mku-basic-warp-sync-test' into m…
michalkucharczyk Nov 21, 2022
722e75d
zombienet: warp sync test enabled
michalkucharczyk Nov 21, 2022
0a86f12
zombinet version bumped
michalkucharczyk Nov 21, 2022
27ac14a
chain-spec path corrected
michalkucharczyk Nov 21, 2022
7c75b69
epoch duration reduced
michalkucharczyk Nov 21, 2022
bf602ac
raw chain spec
michalkucharczyk Nov 21, 2022
4850ad2
log parsing reworked
michalkucharczyk Nov 21, 2022
614486e
'verification failed' added to log check
michalkucharczyk Nov 21, 2022
b929001
fix
michalkucharczyk Nov 21, 2022
11b41c1
Revert "epoch duration reduced"
michalkucharczyk Nov 21, 2022
4c1f695
versioning test
michalkucharczyk Nov 22, 2022
acadf7b
build debug messages added
michalkucharczyk Nov 22, 2022
698e3f9
raw chain spec used
michalkucharczyk Nov 22, 2022
d9bec53
zombienet v1.3.18 used
michalkucharczyk Nov 22, 2022
9dab437
zombienet: warp sync test enabled
michalkucharczyk Nov 21, 2022
97e8817
chain-spec path corrected
michalkucharczyk Nov 21, 2022
f3e8df2
build debugging: better msg
michalkucharczyk Nov 22, 2022
49ebf53
log parsing improved
michalkucharczyk Nov 21, 2022
6eacafb
warp sync test: removed validators
michalkucharczyk Nov 22, 2022
ab7067c
trigger CI job
michalkucharczyk Nov 22, 2022
ea2cf67
fix
michalkucharczyk Nov 22, 2022
740cd1d
Merge remote-tracking branch 'origin/master' into mku-basic-warp-sync…
michalkucharczyk Nov 22, 2022
d24ce71
review remarks applied
michalkucharczyk Nov 22, 2022
5faed02
Merge remote-tracking branch 'origin/mku-basic-warp-sync-test' into m…
michalkucharczyk Nov 22, 2022
ed41df7
test network with validators
michalkucharczyk Nov 22, 2022
e031b1c
Merge remote-tracking branch 'origin/master' into mku-warp-sync-ci-test
Nov 25, 2022
a752929
Explicitly touch `version.rs` to invalidate the related cache
rcny Dec 1, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ stages:
- test
- build
- publish
- zombienet
- deploy
- notify

Expand All @@ -51,6 +52,7 @@ variables:
BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27"
RUSTY_CACHIER_SINGLE_BRANCH: master
RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true"
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.18"

default:
retry:
Expand Down Expand Up @@ -183,14 +185,28 @@ default:
- frame/contracts/**/*
- primitives/sandbox/**/*

.publish-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1

.build-refs:
# publish-refs + PRs
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs

.zombienet-refs:
extends: .build-refs

.nightly-pipeline:
rules:
Expand Down Expand Up @@ -222,6 +238,8 @@ include:
- scripts/ci/gitlab/pipeline/build.yml
# publish jobs
- scripts/ci/gitlab/pipeline/publish.yml
# zombienet jobs
- scripts/ci/gitlab/pipeline/zombienet.yml

#### stage: deploy

Expand Down
1 change: 1 addition & 0 deletions bin/node/cli/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ mod cli {
use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};

pub fn main() {
println!("cargo:warning=WARNING build called");
build_shell_completion();
generate_cargo_keys();

Expand Down
46 changes: 44 additions & 2 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1525,11 +1525,12 @@ where
if let Some(next_epoch_descriptor) = next_epoch_digest {
old_epoch_changes = Some((*epoch_changes).clone());

let viable_epoch = epoch_changes
let mut viable_epoch = epoch_changes
.viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))
.ok_or_else(|| {
ConsensusError::ClientImport(Error::<Block>::FetchEpoch(parent_hash).into())
})?;
})?
.into_cloned();

let epoch_config = next_config_digest
.map(Into::into)
Expand All @@ -1542,6 +1543,47 @@ where
log::Level::Info
};

if viable_epoch.as_ref().end_slot() <= slot {
// some epochs must have been skipped as our current slot
// fits outside the current epoch. we will figure out
// which epoch it belongs to and we will re-use the same
// data for that epoch
let mut epoch_data = viable_epoch.as_mut();
let skipped_epochs =
*slot.saturating_sub(epoch_data.start_slot) / epoch_data.duration;

// NOTE: notice that we are only updating a local copy of the `Epoch`, this
// makes it so that when we insert the next epoch into `EpochChanges` below
// (after incrementing it), it will use the correct epoch index and start slot.
// we do not update the original epoch that will be re-used because there might
// be other forks (that we haven't imported) where the epoch isn't skipped, and
// to import those forks we want to keep the original epoch data. not updating
// the original epoch works because when we search the tree for which epoch to
// use for a given slot, we will search in-depth with the predicate
// `epoch.start_slot <= slot` which will still match correctly without updating
// `start_slot` to the correct value as below.
let epoch_index = epoch_data.epoch_index.checked_add(skipped_epochs).expect(
"epoch number is u64; it should be strictly smaller than number of slots; \
slots relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.",
);

let start_slot = skipped_epochs
.checked_mul(epoch_data.duration)
.and_then(|skipped_slots| epoch_data.start_slot.checked_add(skipped_slots))
.expect(
"slot number is u64; it should relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.",
);

warn!(target: "babe", "👶 Epoch(s) skipped: from {} to {}",
epoch_data.epoch_index, epoch_index,
);

epoch_data.epoch_index = epoch_index;
epoch_data.start_slot = Slot::from(start_slot);
}

log!(target: "babe",
log_level,
"👶 New epoch {} launching at block {} (block slot {} >= start slot {}).",
Expand Down
256 changes: 256 additions & 0 deletions client/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use rand_chacha::{
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer};
use sc_consensus::{BoxBlockImport, BoxJustificationImport};
use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition};
use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
use sc_network_test::{Block as TestBlock, *};
use sp_application_crypto::key_types::BABE;
Expand Down Expand Up @@ -1051,3 +1052,258 @@ fn obsolete_blocks_aux_data_cleanup() {
// Present C4, C5
assert!(aux_data_check(&fork3_hashes, true));
}

#[test]
fn allows_skipping_epochs() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;

// we create all of the blocks in epoch 0 as well as a block in epoch 1
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
BlockId::Number(0),
epoch_length as usize + 1,
);

// the first block in epoch 0 (#1) announces both epoch 0 and 1 (this is a
// special genesis epoch)
let epoch0 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis0,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch0.epoch_index, 0);
assert_eq!(epoch0.start_slot, Slot::from(1));

let epoch1 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis1,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch1.epoch_index, 1);
assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1));

// the first block in epoch 1 (#7) announces epoch 2. we will be skipping
// this epoch and therefore re-using its data for epoch 3
let epoch2 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: blocks[epoch_length as usize],
number: epoch_length + 1,
})
.unwrap()
.clone();

assert_eq!(epoch2.epoch_index, 2);
assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1));

// we now author a block that belongs to epoch 3, thereby skipping epoch 2
let last_block = client.expect_header(BlockId::Hash(*blocks.last().unwrap())).unwrap();
let block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
);

// and the first block in epoch 3 (#8) announces epoch 4
let epoch4 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: block,
number: epoch_length + 2,
})
.unwrap()
.clone();

assert_eq!(epoch4.epoch_index, 4);
assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1));

// if we try to get the epoch data for a slot in epoch 3
let epoch3 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get back the data for epoch 2
assert_eq!(epoch3, epoch2);

// but if we try to get the epoch data for a slot in epoch 4
let epoch4_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get epoch 4 as expected
assert_eq!(epoch4, epoch4_);
}

#[test]
fn allows_skipping_epochs_on_some_forks() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;

// we create all of the blocks in epoch 0 as well as two blocks in epoch 1
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
BlockId::Number(0),
epoch_length as usize + 1,
);

// we now author a block that belongs to epoch 2, built on top of the last
// authored block in epoch 1.
let last_block = client.expect_header(BlockId::Hash(*blocks.last().unwrap())).unwrap();

let epoch2_block = propose_and_import_block(
&last_block,
Some((epoch_length * 2 + 1).into()),
&mut proposer_factory,
&mut block_import,
);

// if we try to get the epoch data for a slot in epoch 2, we get the data that
// was previously announced when epoch 1 started
let epoch2 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 2 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we now author a block that belongs to epoch 3, built on top of the last
// authored block in epoch 1. authoring this block means we're skipping epoch 2
// entirely on this fork
let epoch3_block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
);

// if we try to get the epoch data for a slot in epoch 3
let epoch3_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get back the data for epoch 2
assert_eq!(epoch3_, epoch2);

// if we try to get the epoch data for a slot in epoch 4 in the fork
// where we skipped epoch 2, we should get the epoch data for epoch 4
// that was announced at the beginning of epoch 3
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch3_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

assert!(epoch_data != epoch3_);

// if we try to get the epoch data for a slot in epoch 4 in the fork
// where we didn't skip epoch 2, we should get back the data for epoch 3,
// that was announced when epoch 2 started in that fork
let epoch_data = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&epoch2_block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

assert!(epoch_data != epoch3_);

let epoch3 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: epoch2_block,
number: epoch_length + 2,
})
.unwrap()
.clone();

assert_eq!(epoch_data, epoch3);
}
Loading