......@@ -109,6 +109,7 @@ pub type UncheckedExtrinsic =
/// Migrations to apply on runtime upgrade.
pub type Migrations = (
cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4<Runtime>,
pallet_broker::migration::MigrateV0ToV1<Runtime>,
// permanent
pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
);
......@@ -719,11 +720,20 @@ impl_runtime_apis! {
use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark;
impl pallet_xcm::benchmarking::Config for Runtime {
type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>;
type DeliveryHelper = (
cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>,
polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
PriceForSiblingParachainDelivery,
RandomParaId,
ParachainSystem,
>
);
fn reachable_dest() -> Option<Location> {
Some(Parent.into())
......@@ -741,8 +751,21 @@ impl_runtime_apis! {
}
fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> {
// Reserve transfers are disabled
None
// Coretime chain can reserve transfer regions to some random parachain.
// Properties of a mock region:
let core = 0;
let begin = 0;
let end = 42;
let region_id = pallet_broker::Pallet::<Runtime>::issue(core, begin, end, None, None);
Some((
Asset {
fun: NonFungible(Index(region_id.into())),
id: AssetId(xcm_config::BrokerPalletLocation::get())
},
ParentThen(Parachain(RandomParaId::get().into()).into()).into(),
))
}
fn get_asset() -> Asset {
......@@ -758,15 +781,25 @@ impl_runtime_apis! {
RocRelayLocation::get(),
ExistentialDeposit::get()
).into());
pub const RandomParaId: ParaId = ParaId::new(43211234);
}
impl pallet_xcm_benchmarks::Config for Runtime {
type XcmConfig = xcm_config::XcmConfig;
type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>;
type DeliveryHelper = (
cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>,
polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
PriceForSiblingParachainDelivery,
RandomParaId,
ParachainSystem,
>
);
type AccountIdConverter = xcm_config::LocationToAccountId;
fn valid_destination() -> Result<Location, BenchmarkError> {
Ok(RocRelayLocation::get())
......
......@@ -293,7 +293,7 @@ impl pallet_xcm::Config for Runtime {
type XcmExecuteFilter = Everything;
type XcmExecutor = XcmExecutor<XcmConfig>;
type XcmTeleportFilter = Everything;
type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location.
type XcmReserveTransferFilter = Everything;
type Weigher = WeightInfoBounds<
crate::weights::xcm::CoretimeRococoXcmWeight<RuntimeCall>,
RuntimeCall,
......
......@@ -109,6 +109,7 @@ pub type UncheckedExtrinsic =
/// Migrations to apply on runtime upgrade.
pub type Migrations = (
cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4<Runtime>,
pallet_broker::migration::MigrateV0ToV1<Runtime>,
// permanent
pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
);
......@@ -218,6 +219,7 @@ impl pallet_authorship::Config for Runtime {
parameter_types! {
pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT;
pub const RandomParaId: ParaId = ParaId::new(43211234);
}
impl pallet_balances::Config for Runtime {
......@@ -710,11 +712,20 @@ impl_runtime_apis! {
use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark;
impl pallet_xcm::benchmarking::Config for Runtime {
type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>;
type DeliveryHelper = (
cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>,
polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
PriceForSiblingParachainDelivery,
RandomParaId,
ParachainSystem,
>
);
fn reachable_dest() -> Option<Location> {
Some(Parent.into())
......@@ -732,8 +743,21 @@ impl_runtime_apis! {
}
fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> {
// Reserve transfers are disabled
None
// Coretime chain can reserve transfer regions to some random parachain.
// Properties of a mock region:
let core = 0;
let begin = 0;
let end = 42;
let region_id = pallet_broker::Pallet::<Runtime>::issue(core, begin, end, None, None);
Some((
Asset {
fun: NonFungible(Index(region_id.into())),
id: AssetId(xcm_config::BrokerPalletLocation::get())
},
ParentThen(Parachain(RandomParaId::get().into()).into()).into(),
))
}
fn get_asset() -> Asset {
......@@ -753,11 +777,22 @@ impl_runtime_apis! {
impl pallet_xcm_benchmarks::Config for Runtime {
type XcmConfig = xcm_config::XcmConfig;
type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>;
type DeliveryHelper = (
cumulus_primitives_utility::ToParentDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
xcm_config::PriceForParentDelivery,
>,
polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper<
xcm_config::XcmConfig,
ExistentialDepositAsset,
PriceForSiblingParachainDelivery,
RandomParaId,
ParachainSystem,
>
);
type AccountIdConverter = xcm_config::LocationToAccountId;
fn valid_destination() -> Result<Location, BenchmarkError> {
Ok(TokenRelayLocation::get())
......
......@@ -300,7 +300,7 @@ impl pallet_xcm::Config for Runtime {
type XcmExecuteFilter = Everything;
type XcmExecutor = XcmExecutor<XcmConfig>;
type XcmTeleportFilter = Everything;
type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location.
type XcmReserveTransferFilter = Everything;
type Weigher = WeightInfoBounds<
crate::weights::xcm::CoretimeWestendXcmWeight<RuntimeCall>,
RuntimeCall,
......
......@@ -22,8 +22,8 @@ frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/r
frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true }
frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true }
pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false }
pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true }
pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true }
pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false }
pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false }
pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false }
sp-api = { path = "../../../../../substrate/primitives/api", default-features = false }
sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false }
......
......@@ -17,8 +17,8 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive
# Substrate
sp-core = { path = "../../../substrate/primitives/core", default-features = false }
sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false }
sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true }
sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true }
sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true, default-features = false }
sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true, default-features = false }
sp-std = { path = "../../../substrate/primitives/std", default-features = false }
sp-trie = { path = "../../../substrate/primitives/trie", default-features = false }
......@@ -34,6 +34,8 @@ std = [
"scale-info/std",
"sp-core/std",
"sp-inherents/std",
"sp-runtime?/std",
"sp-state-machine?/std",
"sp-std/std",
"sp-trie/std",
]
......@@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install
docker run --rm -it \
-w /polkadot-sdk \
-v $(pwd):/polkadot-sdk \
paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \
docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \
cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain
sudo chown -R $(id -u):$(id -g) target/
```
......
# PRDoc
## Intro
With the merge of [PR #1946](https://github.com/paritytech/polkadot-sdk/pull/1946), a new method for
documenting changes has been introduced: `prdoc`. The [prdoc repository](https://github.com/paritytech/prdoc)
contains more documentation and tooling.
The current document describes how to quickly get started authoring `PRDoc` files.
A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to
record changes on a crate level. This information is then processed by the release team to apply the correct crate
version bumps and to generate the CHANGELOG of the next release.
## Requirements
When creating a PR, the author needs to decides with the `R0` label whether the change (PR) should
appear in the release notes or not.
Labelling a PR with `R0` means that no `PRDoc` is required.
A PR without the `R0` label **does** require a valid `PRDoc` file to be introduced in the PR.
## PRDoc how-to
A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema).
For significant changes, a `.prdoc` file is mandatory and the file must meet the following
requirements:
- file named `pr_NNNN.prdoc` where `NNNN` is the PR number.
For convenience, those file can also contain a short description: `pr_NNNN_foobar.prdoc`.
- located under the [`prdoc` folder](https://github.com/paritytech/polkadot-sdk/tree/master/prdoc) of the repository
- compliant with the [JSON schema](https://json-schema.org/) defined in `prdoc/schema_user.json`
Those requirements can be fulfilled manually without any tooling but a text editor.
## Tooling
Users might find the following helpers convenient:
- Setup VSCode to be aware of the prdoc schema: see [using VSCode](https://github.com/paritytech/prdoc#using-vscode)
- Using the `prdoc` cli to:
- generate a `PRDoc` file from a [template defined in the Polkadot SDK
repo](https://github.com/paritytech/polkadot-sdk/blob/master/prdoc/.template.prdoc) simply providing a PR number
- check the validity of one or more `PRDoc` files
When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The
`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping
the [CODEOWNERS](../../.github/CODEOWNERS) for advice.
## `prdoc` cli usage
## PRDoc How-To
The `prdoc` cli documentation can be found at https://github.com/paritytech/prdoc#prdoc
A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one:
tldr:
- `prdoc generate <NNNN>`
- `prdoc check -n <NNNN>`
1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install prdoc`.
1. Open a Pull Request and get the PR number.
1. Generate the file with `prdoc generate <PR_NUMBER>`. The output filename will be printed.
1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example
[VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas).
1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections.
1. Check your prdoc with `prdoc check -n <PR_NUMBER>`. This is optional since the CI will also check it.
where <NNNN> is the PR number.
> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file:
> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc`
## Pick an audience
## Pick An Audience
While describing a PR, the author needs to consider which audience(s) need to be addressed.
The list of valid audiences is described and documented in the JSON schema as follow:
......@@ -65,7 +41,41 @@ The list of valid audiences is described and documented in the JSON schema as fo
- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain.
## Tips
If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and
re-phrase the changes for each audience.
## Record SemVer Changes
All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells
the release team how to bump the crate version prior to the next release. It is very important that this information is
correct, otherwise it could break the code of downstream teams.
The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by
[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other
applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs
are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc
about them.
> **Note**: There is currently no CI in place to sanity check this information, but should be added soon.
### Example
For example when you modified two crates and record the changes:
```yaml
crates:
- name: frame-example
bump: major
- name: frame-example-pallet
bump: minor
```
It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using
`frame-example` might break.
### Dependencies
The PRDoc schema is defined in each repo and usually is quite restrictive.
You cannot simply add a new property to a `PRDoc` file unless the Schema allows it.
A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to
bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change.
`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest
compatible version.
......@@ -38,7 +38,7 @@
//! // Use nightly formatting.
//! // See the polkadot-sdk CI job that checks formatting for the current version used in
//! // polkadot-sdk.
//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-01-22"],
//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-04-10"],
//! }
//! ```
//!
......@@ -79,7 +79,7 @@
//! # Use nightly formatting.
//! # See the polkadot-sdk CI job that checks formatting for the current version used in
//! # polkadot-sdk.
//! extraArgs = { "+nightly-2024-01-22" },
//! extraArgs = { "+nightly-2024-04-10" },
//! },
//! },
//! ```
......
......@@ -978,6 +978,7 @@ where
woken_block,
woken_candidate,
&subsystem.metrics,
&wakeups,
).await?
}
next_msg = ctx.recv().fuse() => {
......@@ -1152,6 +1153,7 @@ async fn handle_actions<Context>(
candidate_hash,
delayed_approvals_timers,
approval_request,
&wakeups,
)
.await?
.into_iter()
......@@ -1663,6 +1665,7 @@ async fn handle_from_overseer<Context>(
|r| {
let _ = res.send(r);
},
&wakeups,
)
.await?
.0,
......@@ -2477,6 +2480,7 @@ async fn check_and_import_approval<T, Sender>(
metrics: &Metrics,
approval: IndirectSignedApprovalVoteV2,
with_response: impl FnOnce(ApprovalCheckResult) -> T,
wakeups: &Wakeups,
) -> SubsystemResult<(Vec<Action>, T)>
where
Sender: SubsystemSender<RuntimeApiMessage>,
......@@ -2655,6 +2659,7 @@ where
approved_candidate_hash,
candidate_entry,
ApprovalStateTransition::RemoteApproval(approval.validator),
wakeups,
)
.await;
actions.extend(new_actions);
......@@ -2689,6 +2694,10 @@ impl ApprovalStateTransition {
ApprovalStateTransition::WakeupProcessed => false,
}
}
fn is_remote_approval(&self) -> bool {
matches!(*self, ApprovalStateTransition::RemoteApproval(_))
}
}
// Advance the approval state, either by importing an approval vote which is already checked to be
......@@ -2705,6 +2714,7 @@ async fn advance_approval_state<Sender>(
candidate_hash: CandidateHash,
mut candidate_entry: CandidateEntry,
transition: ApprovalStateTransition,
wakeups: &Wakeups,
) -> Vec<Action>
where
Sender: SubsystemSender<RuntimeApiMessage>,
......@@ -2835,6 +2845,43 @@ where
status.required_tranches,
));
if is_approved && transition.is_remote_approval() {
// Make sure we wake other blocks in case they have
// a no-show that might be covered by this approval.
for (fork_block_hash, fork_approval_entry) in candidate_entry
.block_assignments
.iter()
.filter(|(hash, _)| **hash != block_hash)
{
let assigned_on_fork_block = validator_index
.as_ref()
.map(|validator_index| fork_approval_entry.is_assigned(*validator_index))
.unwrap_or_default();
if wakeups.wakeup_for(*fork_block_hash, candidate_hash).is_none() &&
!fork_approval_entry.is_approved() &&
assigned_on_fork_block
{
let fork_block_entry = db.load_block_entry(fork_block_hash);
if let Ok(Some(fork_block_entry)) = fork_block_entry {
actions.push(Action::ScheduleWakeup {
block_hash: *fork_block_hash,
block_number: fork_block_entry.block_number(),
candidate_hash,
// Schedule the wakeup next tick, since the assignment must be a
// no-show, because there is no-wakeup scheduled.
tick: tick_now + 1,
})
} else {
gum::debug!(
target: LOG_TARGET,
?fork_block_entry,
?fork_block_hash,
"Failed to load block entry"
)
}
}
}
}
// We have no need to write the candidate entry if all of the following
// is true:
//
......@@ -2896,6 +2943,7 @@ async fn process_wakeup<Context>(
relay_block: Hash,
candidate_hash: CandidateHash,
metrics: &Metrics,
wakeups: &Wakeups,
) -> SubsystemResult<Vec<Action>> {
let mut span = state
.spans
......@@ -3064,6 +3112,7 @@ async fn process_wakeup<Context>(
candidate_hash,
candidate_entry,
ApprovalStateTransition::WakeupProcessed,
wakeups,
)
.await,
);
......@@ -3294,6 +3343,7 @@ async fn issue_approval<Context>(
candidate_hash: CandidateHash,
delayed_approvals_timers: &mut DelayedApprovalTimer,
ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest,
wakeups: &Wakeups,
) -> SubsystemResult<Vec<Action>> {
let mut issue_approval_span = state
.spans
......@@ -3415,6 +3465,7 @@ async fn issue_approval<Context>(
candidate_hash,
candidate_entry,
ApprovalStateTransition::LocalApproval(validator_index as _),
wakeups,
)
.await;
......
......@@ -834,7 +834,6 @@ impl ChainBuilder {
cur_hash = cur_header.parent_hash;
}
ancestry.reverse();
import_block(
overseer,
ancestry.as_ref(),
......@@ -1922,6 +1921,187 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() {
});
}
#[test]
fn subsystem_always_has_a_wakeup_when_pending() {
// Approvals sent after all assignments are no-show, the approval
// should be counted on the fork relay chain on the next tick.
test_approvals_on_fork_are_always_considered_after_no_show(
30,
vec![(29, false), (30, false), (31, true)],
);
// Approvals sent before fork no-shows, the approval
// should be counted on the fork relay chain when it no-shows.
test_approvals_on_fork_are_always_considered_after_no_show(
8, // a tick smaller than the no-show tick which is 30.
vec![(7, false), (8, false), (29, false), (30, true), (31, true)],
);
}
fn test_approvals_on_fork_are_always_considered_after_no_show(
tick_to_send_approval: Tick,
expected_approval_status: Vec<(Tick, bool)>,
) {
let config = HarnessConfig::default();
let store = config.backend();
test_harness(config, |test_harness| async move {
let TestHarness {
mut virtual_overseer,
clock,
sync_oracle_handle: _sync_oracle_handle,
..
} = test_harness;
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => {
rx.send(Ok(0)).unwrap();
}
);
let candidate_hash = Hash::repeat_byte(0x04);
let candidate_descriptor = make_candidate(ParaId::from(1_u32), &candidate_hash);
let candidate_hash = candidate_descriptor.hash();
let block_hash = Hash::repeat_byte(0x01);
let block_hash_fork = Hash::repeat_byte(0x02);
let candidate_index = 0;
let validator = ValidatorIndex(0);
let validators = vec![
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Eve,
];
// Add block hash 0x01 and for 0x02
ChainBuilder::new()
.add_block(
block_hash,
ChainBuilder::GENESIS_HASH,
1,
BlockConfig {
slot: Slot::from(1),
candidates: Some(vec![(
candidate_descriptor.clone(),
CoreIndex(0),
GroupIndex(0),
)]),
session_info: Some(SessionInfo {
validator_groups: IndexedVec::<GroupIndex, Vec<ValidatorIndex>>::from(
vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
),
needed_approvals: 1,
..session_info(&validators)
}),
end_syncing: false,
},
)
.add_block(
block_hash_fork,
ChainBuilder::GENESIS_HASH,
1,
BlockConfig {
slot: Slot::from(1),
candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]),
session_info: Some(SessionInfo {
validator_groups: IndexedVec::<GroupIndex, Vec<ValidatorIndex>>::from(
vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
),
needed_approvals: 1,
..session_info(&validators)
}),
end_syncing: false,
},
)
.build(&mut virtual_overseer)
.await;
// Send assignments for the same candidate on both forks
let rx = check_and_import_assignment(
&mut virtual_overseer,
block_hash,
candidate_index,
validator,
)
.await;
assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted));
let rx = check_and_import_assignment(
&mut virtual_overseer,
block_hash_fork,
candidate_index,
validator,
)
.await;
assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted));
// Wake on APPROVAL_DELAY first
assert!(clock.inner.lock().current_wakeup_is(2));
clock.inner.lock().set_tick(2);
futures_timer::Delay::new(Duration::from_millis(100)).await;
// Wake up on no-show
assert!(clock.inner.lock().current_wakeup_is(30));
for (tick, status) in expected_approval_status
.iter()
.filter(|(tick, _)| *tick < tick_to_send_approval)
{
// Wake up on no-show
clock.inner.lock().set_tick(*tick);
futures_timer::Delay::new(Duration::from_millis(100)).await;
let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap();
let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap();
assert!(!block_entry.is_fully_approved());
assert_eq!(block_entry_fork.is_fully_approved(), *status);
}
clock.inner.lock().set_tick(tick_to_send_approval);
futures_timer::Delay::new(Duration::from_millis(100)).await;
// Send the approval for candidate just in the context of 0x01 block.
let rx = check_and_import_approval(
&mut virtual_overseer,
block_hash,
candidate_index,
validator,
candidate_hash,
1,
false,
None,
)
.await;
assert_eq!(rx.await, Ok(ApprovalCheckResult::Accepted),);
// Check approval status for the fork_block is correctly transitioned.
for (tick, status) in expected_approval_status
.iter()
.filter(|(tick, _)| *tick >= tick_to_send_approval)
{
// Wake up on no-show
clock.inner.lock().set_tick(*tick);
futures_timer::Delay::new(Duration::from_millis(100)).await;
let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap();
let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap();
assert!(block_entry.is_fully_approved());
assert_eq!(block_entry_fork.is_fully_approved(), *status);
}
virtual_overseer
});
}
#[test]
fn subsystem_process_wakeup_schedules_wakeup() {
test_harness(HarnessConfig::default(), |test_harness| async move {
......
......@@ -657,7 +657,14 @@ async fn validate_candidate_exhaustive(
PrepareJobKind::Compilation,
);
validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await
validation_backend
.validate_candidate(
pvf,
exec_timeout,
params.encode(),
polkadot_node_core_pvf::Priority::Normal,
)
.await
},
PvfExecKind::Approval =>
validation_backend
......@@ -667,6 +674,7 @@ async fn validate_candidate_exhaustive(
params,
executor_params,
PVF_APPROVAL_EXECUTION_RETRY_DELAY,
polkadot_node_core_pvf::Priority::Critical,
)
.await,
};
......@@ -749,10 +757,15 @@ trait ValidationBackend {
pvf: PvfPrepData,
exec_timeout: Duration,
encoded_params: Vec<u8>,
// The priority for the preparation job.
prepare_priority: polkadot_node_core_pvf::Priority,
) -> Result<WasmValidationResult, ValidationError>;
/// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered
/// that may have been transient.
/// Tries executing a PVF. Will retry once if an error is encountered that may have
/// been transient.
///
/// The `prepare_priority` is relevant in the context of the caller. Currently we expect
/// that `approval` context has priority over `backing` context.
///
/// NOTE: Should retry only on errors that are a result of execution itself, and not of
/// preparation.
......@@ -763,6 +776,8 @@ trait ValidationBackend {
params: ValidationParams,
executor_params: ExecutorParams,
retry_delay: Duration,
// The priority for the preparation job.
prepare_priority: polkadot_node_core_pvf::Priority,
) -> Result<WasmValidationResult, ValidationError> {
let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare);
// Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap.
......@@ -776,8 +791,10 @@ trait ValidationBackend {
// long.
let total_time_start = Instant::now();
let mut validation_result =
self.validate_candidate(pvf.clone(), exec_timeout, params.encode()).await;
// Use `Priority::Critical` as finality trumps parachain liveliness.
let mut validation_result = self
.validate_candidate(pvf.clone(), exec_timeout, params.encode(), prepare_priority)
.await;
if validation_result.is_ok() {
return validation_result
}
......@@ -851,8 +868,9 @@ trait ValidationBackend {
// Encode the params again when re-trying. We expect the retry case to be relatively
// rare, and we want to avoid unconditionally cloning data.
validation_result =
self.validate_candidate(pvf.clone(), new_timeout, params.encode()).await;
validation_result = self
.validate_candidate(pvf.clone(), new_timeout, params.encode(), prepare_priority)
.await;
}
}
......@@ -870,11 +888,13 @@ impl ValidationBackend for ValidationHost {
pvf: PvfPrepData,
exec_timeout: Duration,
encoded_params: Vec<u8>,
// The priority for the preparation job.
prepare_priority: polkadot_node_core_pvf::Priority,
) -> Result<WasmValidationResult, ValidationError> {
let priority = polkadot_node_core_pvf::Priority::Normal;
let (tx, rx) = oneshot::channel();
if let Err(err) = self.execute_pvf(pvf, exec_timeout, encoded_params, priority, tx).await {
if let Err(err) =
self.execute_pvf(pvf, exec_timeout, encoded_params, prepare_priority, tx).await
{
return Err(InternalValidationError::HostCommunication(format!(
"cannot send pvf to the validation host, it might have shut down: {:?}",
err
......
......@@ -368,6 +368,7 @@ impl ValidationBackend for MockValidateCandidateBackend {
_pvf: PvfPrepData,
_timeout: Duration,
_encoded_params: Vec<u8>,
_prepare_priority: polkadot_node_core_pvf::Priority,
) -> Result<WasmValidationResult, ValidationError> {
// This is expected to panic if called more times than expected, indicating an error in the
// test.
......@@ -1044,6 +1045,7 @@ impl ValidationBackend for MockPreCheckBackend {
_pvf: PvfPrepData,
_timeout: Duration,
_encoded_params: Vec<u8>,
_prepare_priority: polkadot_node_core_pvf::Priority,
) -> Result<WasmValidationResult, ValidationError> {
unreachable!()
}
......
......@@ -619,7 +619,7 @@ async fn handle_active_leaf(
// Extract all reversion logs from a header in ascending order.
//
// Ignores logs with number >= the block header number.
// Ignores logs with number > the block header number.
fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
let number = header.number;
let mut logs = header
......@@ -639,14 +639,14 @@ fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
None
},
Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b),
Ok(Some(ConsensusLog::Revert(b))) if b <= number => Some(b),
Ok(Some(ConsensusLog::Revert(b))) => {
gum::warn!(
target: LOG_TARGET,
revert_target = b,
block_number = number,
block_hash = ?header.hash(),
"Block issued invalid revert digest targeting itself or future"
"Block issued invalid revert digest targeting future"
);
None
......
......@@ -966,19 +966,54 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() {
}
#[test]
fn self_and_future_reversions_are_ignored() {
fn self_reversions_are_not_ignored() {
test_harness(|backend, _, mut virtual_overseer| async move {
let finalized_number = 0;
let finalized_hash = Hash::repeat_byte(0);
// F <- A1 <- A2 <- A3.
//
// A3 reverts itself and future blocks. ignored.
// A3 reverts itself
let (_, chain_a) =
construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| {
if h.number == 3 {
add_reversions(h, vec![3])
}
});
let a2_hash = chain_a.iter().rev().nth(1).unwrap().0.hash();
import_blocks_into(
&mut virtual_overseer,
&backend,
Some((finalized_number, finalized_hash)),
chain_a.clone(),
)
.await;
assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h));
assert_leaves(&backend, vec![a2_hash]);
assert_leaves_query(&mut virtual_overseer, vec![a2_hash]).await;
virtual_overseer
});
}
#[test]
fn future_reversions_are_ignored() {
test_harness(|backend, _, mut virtual_overseer| async move {
let finalized_number = 0;
let finalized_hash = Hash::repeat_byte(0);
// F <- A1 <- A2 <- A3.
//
// A3 reverts future blocks. ignored.
let (a3_hash, chain_a) =
construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| {
if h.number == 3 {
add_reversions(h, vec![3, 4, 100])
add_reversions(h, vec![4, 100])
}
});
......@@ -1006,7 +1041,7 @@ fn revert_finalized_is_ignored() {
// F <- A1 <- A2 <- A3.
//
// A3 reverts itself and future blocks. ignored.
// A3 reverts finalized F and its ancestors. ignored.
let (a3_hash, chain_a) =
construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| {
......
......@@ -236,7 +236,7 @@ fn propagate_viability_update(
Ok(())
}
/// Imports a new block and applies any reversions to ancestors.
/// Imports a new block and applies any reversions to ancestors or the block itself.
pub(crate) fn import_block(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
......@@ -246,25 +246,29 @@ pub(crate) fn import_block(
weight: BlockWeight,
stagnant_at: Timestamp,
) -> Result<(), Error> {
add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?;
apply_ancestor_reversions(backend, block_hash, block_number, reversion_logs)?;
let block_entry =
add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?;
apply_reversions(backend, block_entry, reversion_logs)?;
Ok(())
}
// Load the given ancestor's block entry, in descending order from the `block_hash`.
// The ancestor_number must be at least one block less than the `block_number`.
// The ancestor_number must be not higher than the `block_entry`'s.
//
// The returned entry will be `None` if the range is invalid or any block in the path had
// no entry present. If any block entry was missing, it can safely be assumed to
// be finalized.
fn load_ancestor(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
block_entry: &BlockEntry,
ancestor_number: BlockNumber,
) -> Result<Option<BlockEntry>, Error> {
if block_number <= ancestor_number {
let block_hash = block_entry.block_hash;
let block_number = block_entry.block_number;
if block_number == ancestor_number {
return Ok(Some(block_entry.clone()))
} else if block_number < ancestor_number {
return Ok(None)
}
......@@ -300,7 +304,7 @@ fn add_block(
parent_hash: Hash,
weight: BlockWeight,
stagnant_at: Timestamp,
) -> Result<(), Error> {
) -> Result<BlockEntry, Error> {
let mut leaves = backend.load_leaves()?;
let parent_entry = backend.load_block_entry(&parent_hash)?;
......@@ -308,7 +312,7 @@ fn add_block(
parent_entry.as_ref().and_then(|parent| parent.non_viable_ancestor_for_child());
// 1. Add the block to the DB assuming it's not reverted.
backend.write_block_entry(BlockEntry {
let block_entry = BlockEntry {
block_hash,
block_number,
parent_hash,
......@@ -319,7 +323,8 @@ fn add_block(
approval: Approval::Unapproved,
},
weight,
});
};
backend.write_block_entry(block_entry.clone());
// 2. Update leaves if inherited viability is fine.
if inherited_viability.is_none() {
......@@ -344,26 +349,25 @@ fn add_block(
stagnant_at_list.push(block_hash);
backend.write_stagnant_at(stagnant_at, stagnant_at_list);
Ok(())
Ok(block_entry)
}
/// Assuming that a block is already imported, accepts the number of the block
/// as well as a list of reversions triggered by the block in ascending order.
fn apply_ancestor_reversions(
fn apply_reversions(
backend: &mut OverlayedBackend<impl Backend>,
block_hash: Hash,
block_number: BlockNumber,
block_entry: BlockEntry,
reversions: Vec<BlockNumber>,
) -> Result<(), Error> {
// Note: since revert numbers are in ascending order, the expensive propagation
// of unviability is only heavy on the first log.
for revert_number in reversions {
let maybe_block_entry = load_ancestor(backend, block_hash, block_number, revert_number)?;
if let Some(block_entry) = &maybe_block_entry {
let maybe_block_entry = load_ancestor(backend, &block_entry, revert_number)?;
if let Some(entry) = &maybe_block_entry {
gum::trace!(
target: LOG_TARGET,
?revert_number,
revert_hash = ?block_entry.block_hash,
revert_hash = ?entry.block_hash,
"Block marked as reverted via scraped on-chain reversions"
);
}
......@@ -372,8 +376,8 @@ fn apply_ancestor_reversions(
maybe_block_entry,
None,
revert_number,
Some(block_hash),
Some(block_number),
Some(block_entry.block_hash),
Some(block_entry.block_number),
)?;
}
......
......@@ -17,8 +17,7 @@ cfg-if = "1.0"
futures = "0.3.30"
futures-timer = "3.0.2"
gum = { package = "tracing-gum", path = "../../gum" }
is_executable = "1.0.1"
libc = "0.2.152"
is_executable = { version = "1.0.1", optional = true }
pin-project = "1.0.9"
rand = "0.8.5"
slotmap = "1.0"
......@@ -26,7 +25,9 @@ tempfile = "3.3.0"
thiserror = { workspace = true }
tokio = { version = "1.24.2", features = ["fs", "process"] }
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] }
parity-scale-codec = { version = "3.6.1", default-features = false, features = [
"derive",
] }
polkadot-parachain-primitives = { path = "../../../parachain" }
polkadot-core-primitives = { path = "../../../core-primitives" }
......@@ -37,14 +38,16 @@ polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-primitives = { path = "../../../primitives" }
sp-core = { path = "../../../../substrate/primitives/core" }
sp-wasm-interface = { path = "../../../../substrate/primitives/wasm-interface" }
sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob" }
sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob", optional = true }
polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true }
polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true }
[dev-dependencies]
assert_matches = "1.4.0"
criterion = { version = "0.4.0", default-features = false, features = ["async_tokio", "cargo_bench_support"] }
criterion = { version = "0.4.0", default-features = false, features = [
"async_tokio",
"cargo_bench_support",
] }
hex-literal = "0.4.1"
polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] }
......@@ -57,6 +60,7 @@ adder = { package = "test-parachain-adder", path = "../../../parachain/test-para
halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" }
[target.'cfg(target_os = "linux")'.dev-dependencies]
libc = "0.2.153"
procfs = "0.16.0"
rusty-fork = "0.3.0"
sc-sysinfo = { path = "../../../../substrate/client/sysinfo" }
......@@ -70,6 +74,8 @@ ci-only-tests = []
jemalloc-allocator = ["polkadot-node-core-pvf-common/jemalloc-allocator"]
# This feature is used to export test code to other crates without putting it in the production build.
test-utils = [
"polkadot-node-core-pvf-execute-worker",
"polkadot-node-core-pvf-prepare-worker",
"dep:is_executable",
"dep:polkadot-node-core-pvf-execute-worker",
"dep:polkadot-node-core-pvf-prepare-worker",
"dep:sp-maybe-compressed-blob",
]
......@@ -10,14 +10,16 @@ license.workspace = true
workspace = true
[dependencies]
cfg-if = "1.0"
cpu-time = "1.0.0"
futures = "0.3.30"
gum = { package = "tracing-gum", path = "../../../gum" }
libc = "0.2.152"
nix = { version = "0.27.1", features = ["resource", "sched"] }
thiserror = { workspace = true }
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] }
parity-scale-codec = { version = "3.6.1", default-features = false, features = [
"derive",
] }
polkadot-parachain-primitives = { path = "../../../../parachain" }
polkadot-primitives = { path = "../../../../primitives" }
......@@ -34,7 +36,6 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" }
[target.'cfg(target_os = "linux")'.dependencies]
landlock = "0.3.0"
nix = { version = "0.27.1", features = ["sched"] }
[target.'cfg(all(target_os = "linux", target_arch = "x86_64"))'.dependencies]
seccompiler = "0.4.0"
......